Merge tag 'for-5.15/libata-2021-08-30' of git://git.kernel.dk/linux-block
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 31 Aug 2021 02:09:45 +0000 (19:09 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 31 Aug 2021 02:09:45 +0000 (19:09 -0700)
Pull libata updates from Jens Axboe:
 "libata changes for the 5.15 release:

   - NCQ priority improvements (Damien, Niklas)

   - coccinelle warning fix (Jing)

   - dwc_460ex phy fix (Andy)"

* tag 'for-5.15/libata-2021-08-30' of git://git.kernel.dk/linux-block:
  include:libata: fix boolreturn.cocci warnings
  docs: sysfs-block-device: document ncq_prio_supported
  docs: sysfs-block-device: improve ncq_prio_enable documentation
  libata: Introduce ncq_prio_supported sysfs sttribute
  libata: print feature list on device scan
  libata: fix ata_read_log_page() warning
  libata: cleanup NCQ priority handling
  libata: cleanup ata_dev_configure()
  libata: cleanup device sleep capability detection
  libata: simplify ata_scsi_rbuf_fill()
  libata: fix ata_host_start()
  ata: sata_dwc_460ex: No need to call phy_exit() befre phy_init()

1968 files changed:
Documentation/ABI/testing/sysfs-block
Documentation/ABI/testing/sysfs-bus-event_source-devices-uncore [new file with mode: 0644]
Documentation/ABI/testing/sysfs-bus-platform
Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst
Documentation/RCU/Design/Requirements/Requirements.rst
Documentation/RCU/checklist.rst
Documentation/RCU/rcu_dereference.rst
Documentation/RCU/stallwarn.rst
Documentation/admin-guide/hw-vuln/index.rst
Documentation/admin-guide/hw-vuln/l1d_flush.rst [new file with mode: 0644]
Documentation/admin-guide/kernel-parameters.txt
Documentation/atomic_t.txt
Documentation/bpf/libbpf/libbpf_naming_convention.rst
Documentation/core-api/cpu_hotplug.rst
Documentation/core-api/irq/irq-domain.rst
Documentation/devicetree/bindings/fsi/ibm,fsi2spi.yaml
Documentation/devicetree/bindings/gpio/rockchip,gpio-bank.yaml
Documentation/devicetree/bindings/iio/st,st-sensors.yaml
Documentation/devicetree/bindings/power/supply/battery.yaml
Documentation/devicetree/bindings/power/supply/maxim,max17042.yaml
Documentation/devicetree/bindings/power/supply/mt6360_charger.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/power/supply/summit,smb347-charger.yaml
Documentation/devicetree/bindings/power/supply/x-powers,axp20x-ac-power-supply.yaml
Documentation/devicetree/bindings/power/supply/x-powers,axp20x-battery-power-supply.yaml
Documentation/devicetree/bindings/power/supply/x-powers,axp20x-usb-power-supply.yaml
Documentation/devicetree/bindings/regulator/richtek,rtq2134-regulator.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/regulator/richtek,rtq6752-regulator.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/regulator/socionext,uniphier-regulator.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/regulator/uniphier-regulator.txt [deleted file]
Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml
Documentation/devicetree/bindings/spi/omap-spi.txt [deleted file]
Documentation/devicetree/bindings/spi/omap-spi.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/spi/rockchip-sfc.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/spi/spi-mt65xx.txt
Documentation/devicetree/bindings/spi/spi-sprd-adi.txt [deleted file]
Documentation/devicetree/bindings/spi/sprd,spi-adi.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/timer/rockchip,rk-timer.txt [deleted file]
Documentation/devicetree/bindings/timer/rockchip,rk-timer.yaml [new file with mode: 0644]
Documentation/driver-api/index.rst
Documentation/driver-api/lightnvm-pblk.rst [deleted file]
Documentation/filesystems/locking.rst
Documentation/filesystems/mandatory-locking.rst [deleted file]
Documentation/gpu/rfc/i915_gem_lmem.rst
Documentation/i2c/index.rst
Documentation/networking/netdev-FAQ.rst
Documentation/networking/nf_conntrack-sysctl.rst
Documentation/networking/operstates.rst
Documentation/trace/ftrace.rst
Documentation/userspace-api/ioctl/ioctl-number.rst
Documentation/userspace-api/seccomp_filter.rst
Documentation/userspace-api/spec_ctrl.rst
Documentation/virt/kvm/api.rst
Documentation/virt/kvm/locking.rst
Documentation/x86/x86_64/boot-options.rst
MAINTAINERS
Makefile
arch/Kconfig
arch/alpha/Kconfig
arch/alpha/boot/bootp.c
arch/alpha/boot/bootpz.c
arch/alpha/boot/misc.c
arch/alpha/configs/defconfig
arch/alpha/include/asm/compiler.h
arch/alpha/include/asm/syscall.h
arch/alpha/kernel/osf_sys.c
arch/alpha/kernel/perf_event.c
arch/alpha/kernel/process.c
arch/alpha/kernel/setup.c
arch/alpha/kernel/smp.c
arch/alpha/kernel/sys_nautilus.c
arch/alpha/kernel/traps.c
arch/alpha/math-emu/math.c
arch/arc/Kconfig
arch/arc/include/asm/checksum.h
arch/arc/include/asm/perf_event.h
arch/arc/kernel/fpu.c
arch/arc/kernel/mcip.c
arch/arc/kernel/unwind.c
arch/arc/kernel/vmlinux.lds.S
arch/arm/Kconfig
arch/arm/boot/dts/am437x-l4.dtsi
arch/arm/boot/dts/am43x-epos-evm.dts
arch/arm/boot/dts/imx53-m53menlo.dts
arch/arm/boot/dts/imx6qdl-sr-som.dtsi
arch/arm/boot/dts/imx6ull-colibri-wifi.dtsi
arch/arm/boot/dts/omap5-board-common.dtsi
arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi
arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
arch/arm/common/sa1111.c
arch/arm/configs/nhk8815_defconfig
arch/arm/crypto/curve25519-glue.c
arch/arm/include/asm/memory.h
arch/arm/kernel/head.S
arch/arm/mach-davinci/Kconfig
arch/arm/mach-imx/common.h
arch/arm/mach-imx/mmdc.c
arch/arm/mach-ixp4xx/Kconfig
arch/arm/mach-ixp4xx/include/mach/ixp4xx-regs.h
arch/arm/mach-omap2/omap_hwmod.c
arch/arm/mach-pxa/pxa_cplds_irqs.c
arch/arm/mach-rpc/riscpc.c
arch/arm/mach-s3c/irq-s3c24xx.c
arch/arm/mm/mmu.c
arch/arm/mm/pv-fixup-asm.S
arch/arm/net/bpf_jit_32.c
arch/arm64/Kconfig
arch/arm64/Makefile
arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var2.dts
arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
arch/arm64/boot/dts/freescale/imx8mp.dtsi
arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
arch/arm64/boot/dts/nvidia/tegra194.dtsi
arch/arm64/boot/dts/qcom/msm8992-bullhead-rev-101.dts
arch/arm64/boot/dts/qcom/msm8994-angler-rev-101.dts
arch/arm64/boot/dts/qcom/sc7280.dtsi
arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi
arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
arch/arm64/crypto/Kconfig
arch/arm64/crypto/sm4-ce-glue.c
arch/arm64/include/asm/el2_setup.h
arch/arm64/include/asm/page.h
arch/arm64/include/asm/ptrace.h
arch/arm64/include/asm/stacktrace.h
arch/arm64/include/asm/syscall.h
arch/arm64/kernel/kaslr.c
arch/arm64/kernel/ptrace.c
arch/arm64/kernel/signal.c
arch/arm64/kernel/stacktrace.c
arch/arm64/kernel/syscall.c
arch/arm64/kvm/arm.c
arch/arm64/kvm/hyp/nvhe/mem_protect.c
arch/arm64/kvm/mmu.c
arch/arm64/mm/init.c
arch/arm64/net/bpf_jit_comp.c
arch/h8300/Kconfig.cpu
arch/ia64/Kconfig
arch/m68k/Kconfig
arch/m68k/Kconfig.cpu
arch/m68k/coldfire/m525x.c
arch/m68k/configs/amiga_defconfig
arch/m68k/configs/apollo_defconfig
arch/m68k/configs/atari_defconfig
arch/m68k/configs/bvme6000_defconfig
arch/m68k/configs/hp300_defconfig
arch/m68k/configs/mac_defconfig
arch/m68k/configs/multi_defconfig
arch/m68k/configs/mvme147_defconfig
arch/m68k/configs/mvme16x_defconfig
arch/m68k/configs/q40_defconfig
arch/m68k/configs/stmark2_defconfig
arch/m68k/configs/sun3_defconfig
arch/m68k/configs/sun3x_defconfig
arch/m68k/emu/nfeth.c
arch/m68k/include/asm/atomic.h
arch/mips/Kconfig
arch/mips/Makefile
arch/mips/ath25/ar2315.c
arch/mips/ath25/ar5312.c
arch/mips/include/asm/mach-rc32434/rb.h
arch/mips/include/asm/pgalloc.h
arch/mips/lantiq/irq.c
arch/mips/mti-malta/malta-platform.c
arch/mips/net/ebpf_jit.c
arch/mips/pci/pci-ar2315.c
arch/mips/pci/pci-rt3883.c
arch/mips/ralink/irq.c
arch/mips/sgi-ip27/ip27-irq.c
arch/mips/sgi-ip30/ip30-irq.c
arch/nios2/kernel/irq.c
arch/parisc/Kconfig
arch/parisc/include/asm/string.h
arch/parisc/kernel/parisc_ksyms.c
arch/parisc/lib/Makefile
arch/parisc/lib/memset.c [new file with mode: 0644]
arch/parisc/lib/string.S [deleted file]
arch/powerpc/Kconfig
arch/powerpc/include/asm/book3s/32/kup.h
arch/powerpc/include/asm/interrupt.h
arch/powerpc/include/asm/irq.h
arch/powerpc/include/asm/ptrace.h
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/head_book3s_32.S
arch/powerpc/kernel/head_booke.h
arch/powerpc/kernel/irq.c
arch/powerpc/kernel/kprobes.c
arch/powerpc/kernel/sysfs.c
arch/powerpc/kernel/time.c
arch/powerpc/kernel/traps.c
arch/powerpc/kernel/vdso64/Makefile
arch/powerpc/mm/pageattr.c
arch/powerpc/net/bpf_jit_comp32.c
arch/powerpc/net/bpf_jit_comp64.c
arch/powerpc/platforms/4xx/uic.c
arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
arch/powerpc/platforms/52xx/media5200.c
arch/powerpc/platforms/52xx/mpc52xx_gpt.c
arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
arch/powerpc/platforms/Kconfig.cputype
arch/powerpc/platforms/cell/interrupt.c
arch/powerpc/platforms/cell/spider-pic.c
arch/powerpc/platforms/embedded6xx/hlwd-pic.c
arch/powerpc/platforms/powernv/opal-irqchip.c
arch/powerpc/platforms/pseries/setup.c
arch/powerpc/sysdev/fsl_mpic_err.c
arch/powerpc/sysdev/fsl_msi.c
arch/powerpc/sysdev/xive/common.c
arch/riscv/Kconfig
arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts
arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi
arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts
arch/riscv/configs/defconfig
arch/riscv/configs/rv32_defconfig
arch/riscv/include/asm/page.h
arch/riscv/kernel/Makefile
arch/riscv/kernel/ptrace.c
arch/riscv/kernel/setup.c
arch/riscv/kernel/stacktrace.c
arch/riscv/mm/init.c
arch/riscv/net/bpf_jit_comp32.c
arch/riscv/net/bpf_jit_comp64.c
arch/s390/Kconfig
arch/s390/Makefile
arch/s390/boot/Makefile
arch/s390/boot/boot.h
arch/s390/boot/compressed/Makefile
arch/s390/boot/compressed/clz_ctz.c [new file with mode: 0644]
arch/s390/boot/compressed/decompressor.c
arch/s390/boot/compressed/decompressor.h
arch/s390/boot/compressed/vmlinux.lds.S
arch/s390/boot/head.S
arch/s390/boot/ipl_report.c
arch/s390/boot/kaslr.c
arch/s390/boot/mem_detect.c
arch/s390/boot/pgm_check_info.c
arch/s390/boot/sclp_early_core.c
arch/s390/boot/startup.c
arch/s390/boot/text_dma.S [deleted file]
arch/s390/boot/uv.c
arch/s390/boot/uv.h [new file with mode: 0644]
arch/s390/configs/debug_defconfig
arch/s390/configs/defconfig
arch/s390/hypfs/hypfs_diag0c.c
arch/s390/include/asm/cio.h
arch/s390/include/asm/cpacf.h
arch/s390/include/asm/cpufeature.h
arch/s390/include/asm/ctl_reg.h
arch/s390/include/asm/debug.h
arch/s390/include/asm/diag.h
arch/s390/include/asm/elf.h
arch/s390/include/asm/extable.h
arch/s390/include/asm/ftrace.h
arch/s390/include/asm/ftrace.lds.h [new file with mode: 0644]
arch/s390/include/asm/ipl.h
arch/s390/include/asm/kfence.h [new file with mode: 0644]
arch/s390/include/asm/kvm_host.h
arch/s390/include/asm/kvm_para.h
arch/s390/include/asm/linkage.h
arch/s390/include/asm/lowcore.h
arch/s390/include/asm/module.h
arch/s390/include/asm/page.h
arch/s390/include/asm/pci.h
arch/s390/include/asm/pci_dma.h
arch/s390/include/asm/pgtable.h
arch/s390/include/asm/processor.h
arch/s390/include/asm/qdio.h
arch/s390/include/asm/sclp.h
arch/s390/include/asm/sections.h
arch/s390/include/asm/set_memory.h
arch/s390/include/asm/setup.h
arch/s390/include/asm/syscall.h
arch/s390/include/asm/uv.h
arch/s390/include/asm/vdso/gettimeofday.h
arch/s390/kernel/Makefile
arch/s390/kernel/asm-offsets.c
arch/s390/kernel/crash_dump.c
arch/s390/kernel/debug.c
arch/s390/kernel/diag.c
arch/s390/kernel/dis.c
arch/s390/kernel/early.c
arch/s390/kernel/entry.S
arch/s390/kernel/entry.h
arch/s390/kernel/ftrace.c
arch/s390/kernel/ftrace.h [new file with mode: 0644]
arch/s390/kernel/head64.S
arch/s390/kernel/ipl.c
arch/s390/kernel/ipl_vmparm.c
arch/s390/kernel/irq.c
arch/s390/kernel/jump_label.c
arch/s390/kernel/machine_kexec.c
arch/s390/kernel/module.c
arch/s390/kernel/os_info.c
arch/s390/kernel/perf_cpum_cf.c
arch/s390/kernel/processor.c
arch/s390/kernel/setup.c
arch/s390/kernel/signal.c
arch/s390/kernel/smp.c
arch/s390/kernel/text_amode31.S [new file with mode: 0644]
arch/s390/kernel/topology.c
arch/s390/kernel/traps.c
arch/s390/kernel/uv.c
arch/s390/kernel/vdso32/Makefile
arch/s390/kernel/vdso32/vdso32.lds.S
arch/s390/kernel/vdso64/Makefile
arch/s390/kernel/vdso64/vdso64.lds.S
arch/s390/kernel/vmlinux.lds.S
arch/s390/kvm/diag.c
arch/s390/kvm/kvm-s390.c
arch/s390/lib/delay.c
arch/s390/mm/dump_pagetables.c
arch/s390/mm/fault.c
arch/s390/mm/init.c
arch/s390/mm/kasan_init.c
arch/s390/mm/maccess.c
arch/s390/mm/page-states.c
arch/s390/mm/pageattr.c
arch/s390/mm/vmem.c
arch/s390/net/bpf_jit_comp.c
arch/s390/pci/pci.c
arch/s390/pci/pci_bus.c
arch/s390/pci/pci_bus.h
arch/s390/pci/pci_clp.c
arch/s390/pci/pci_dma.c
arch/s390/pci/pci_event.c
arch/s390/pci/pci_irq.c
arch/s390/pci/pci_sysfs.c
arch/s390/purgatory/Makefile
arch/s390/tools/opcodes.txt
arch/sh/Kconfig
arch/sh/boards/mach-se/7343/irq.c
arch/sh/boards/mach-se/7722/irq.c
arch/sh/boards/mach-x3proto/gpio.c
arch/sparc/Kconfig
arch/sparc/net/bpf_jit_comp_64.c
arch/um/drivers/ubd_kern.c
arch/x86/Kconfig
arch/x86/Makefile
arch/x86/boot/Makefile
arch/x86/boot/compressed/efi_thunk_64.S
arch/x86/boot/compressed/head_64.S
arch/x86/boot/compressed/kaslr.c
arch/x86/crypto/Makefile
arch/x86/crypto/aesni-intel_glue.c
arch/x86/crypto/sm4-aesni-avx-asm_64.S [new file with mode: 0644]
arch/x86/crypto/sm4-aesni-avx2-asm_64.S [new file with mode: 0644]
arch/x86/crypto/sm4-avx.h [new file with mode: 0644]
arch/x86/crypto/sm4_aesni_avx2_glue.c [new file with mode: 0644]
arch/x86/crypto/sm4_aesni_avx_glue.c [new file with mode: 0644]
arch/x86/events/Kconfig
arch/x86/events/amd/Makefile
arch/x86/events/amd/ibs.c
arch/x86/events/amd/power.c
arch/x86/events/amd/uncore.c
arch/x86/events/core.c
arch/x86/events/intel/core.c
arch/x86/events/intel/pt.c
arch/x86/events/intel/uncore.c
arch/x86/events/intel/uncore.h
arch/x86/events/intel/uncore_discovery.c
arch/x86/events/intel/uncore_discovery.h
arch/x86/events/intel/uncore_snbep.c
arch/x86/events/perf_event.h
arch/x86/include/asm/amd-ibs.h [new file with mode: 0644]
arch/x86/include/asm/i8259.h
arch/x86/include/asm/kfence.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/mce.h
arch/x86/include/asm/nospec-branch.h
arch/x86/include/asm/pc-conf-reg.h [new file with mode: 0644]
arch/x86/include/asm/processor-cyrix.h
arch/x86/include/asm/processor.h
arch/x86/include/asm/svm.h
arch/x86/include/asm/thread_info.h
arch/x86/include/asm/tlbflush.h
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/apic/apic.c
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/apic/msi.c
arch/x86/kernel/apic/vector.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/mce/core.c
arch/x86/kernel/cpu/mce/inject.c
arch/x86/kernel/cpu/microcode/core.c
arch/x86/kernel/cpu/mtrr/mtrr.c
arch/x86/kernel/cpu/resctrl/core.c
arch/x86/kernel/cpu/resctrl/ctrlmondata.c
arch/x86/kernel/cpu/resctrl/internal.h
arch/x86/kernel/cpu/resctrl/monitor.c
arch/x86/kernel/cpu/resctrl/pseudo_lock.c
arch/x86/kernel/cpu/resctrl/rdtgroup.c
arch/x86/kernel/hpet.c
arch/x86/kernel/i8259.c
arch/x86/kernel/mpparse.c
arch/x86/kernel/reboot.c
arch/x86/kernel/smpboot.c
arch/x86/kvm/cpuid.c
arch/x86/kvm/hyperv.c
arch/x86/kvm/i8259.c
arch/x86/kvm/ioapic.c
arch/x86/kvm/ioapic.h
arch/x86/kvm/irq.h
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/tdp_mmu.c
arch/x86/kvm/svm/avic.c
arch/x86/kvm/svm/nested.c
arch/x86/kvm/svm/sev.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm.h
arch/x86/kvm/svm/svm_onhyperv.h
arch/x86/kvm/trace.h
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/vmx.h
arch/x86/kvm/x86.c
arch/x86/lib/Makefile
arch/x86/lib/pc-conf-reg.c [new file with mode: 0644]
arch/x86/mm/mmio-mod.c
arch/x86/mm/tlb.c
arch/x86/net/bpf_jit_comp.c
arch/x86/net/bpf_jit_comp32.c
arch/x86/pci/irq.c
arch/x86/power/cpu.c
arch/x86/tools/chkobjdump.awk
arch/x86/tools/relocs.c
arch/x86/tools/relocs.h
arch/xtensa/Kconfig
arch/xtensa/kernel/irq.c
block/Kconfig
block/Kconfig.iosched
block/Makefile
block/bfq-iosched.c
block/bfq-iosched.h
block/bfq-wf2q.c
block/bio-integrity.c
block/bio.c
block/blk-cgroup.c
block/blk-core.c
block/blk-crypto.c
block/blk-flush.c
block/blk-integrity.c
block/blk-iocost.c
block/blk-iolatency.c
block/blk-map.c
block/blk-merge.c
block/blk-mq-sched.c
block/blk-mq-sysfs.c
block/blk-mq.c
block/blk-settings.c
block/blk-sysfs.c
block/blk-throttle.c
block/blk-wbt.c
block/blk-zoned.c
block/blk.h
block/bounce.c
block/cmdline-parser.c [deleted file]
block/disk-events.c
block/elevator.c
block/genhd.c
block/holder.c [new file with mode: 0644]
block/ioctl.c
block/ioprio.c
block/kyber-iosched.c
block/mq-deadline-cgroup.c [deleted file]
block/mq-deadline-cgroup.h [deleted file]
block/mq-deadline-main.c [deleted file]
block/mq-deadline.c [new file with mode: 0644]
block/partitions/Kconfig
block/partitions/acorn.c
block/partitions/aix.c
block/partitions/amiga.c
block/partitions/atari.c
block/partitions/check.h
block/partitions/cmdline.c
block/partitions/core.c
block/partitions/efi.c
block/partitions/ibm.c
block/partitions/ldm.c
block/partitions/mac.c
block/partitions/msdos.c
block/partitions/sgi.c
block/partitions/sun.c
block/t10-pi.c
certs/Kconfig
certs/Makefile
crypto/Kconfig
crypto/Makefile
crypto/asymmetric_keys/pkcs7_parser.c
crypto/ecc.h
crypto/sha512_generic.c
crypto/skcipher.c
crypto/sm4_generic.c
crypto/tcrypt.c
crypto/testmgr.c
crypto/testmgr.h
crypto/wp512.c
drivers/Kconfig
drivers/Makefile
drivers/acpi/acpica/nsrepair2.c
drivers/acpi/dptf/dptf_pch_fivr.c
drivers/acpi/nfit/core.c
drivers/acpi/prmt.c
drivers/acpi/resource.c
drivers/acpi/x86/s2idle.c
drivers/ata/libata-sff.c
drivers/base/core.c
drivers/base/dd.c
drivers/base/firmware_loader/fallback.c
drivers/base/firmware_loader/firmware.h
drivers/base/firmware_loader/main.c
drivers/base/platform-msi.c
drivers/base/power/domain.c
drivers/base/regmap/internal.h
drivers/base/regmap/regmap-debugfs.c
drivers/base/regmap/regmap-mmio.c
drivers/base/regmap/regmap.c
drivers/block/Kconfig
drivers/block/brd.c
drivers/block/cryptoloop.c
drivers/block/drbd/drbd_nl.c
drivers/block/drbd/drbd_req.c
drivers/block/floppy.c
drivers/block/loop.c
drivers/block/n64cart.c
drivers/block/nbd.c
drivers/block/null_blk/main.c
drivers/block/paride/pd.c
drivers/block/pktcdvd.c
drivers/block/ps3disk.c
drivers/block/ps3vram.c
drivers/block/rbd.c
drivers/block/rnbd/rnbd-clt-sysfs.c
drivers/block/rnbd/rnbd-clt.c
drivers/block/rnbd/rnbd-srv-sysfs.c
drivers/block/sx8.c
drivers/block/virtio_blk.c
drivers/block/xen-blkfront.c
drivers/bus/ti-sysc.c
drivers/char/hw_random/Kconfig
drivers/char/hw_random/Makefile
drivers/char/hw_random/amd-rng.c
drivers/char/hw_random/arm_smccc_trng.c [new file with mode: 0644]
drivers/char/hw_random/geode-rng.c
drivers/char/hw_random/intel-rng.c
drivers/char/hw_random/via-rng.c
drivers/char/tpm/Kconfig
drivers/char/tpm/tpm_ftpm_tee.c
drivers/char/tpm/tpm_ibmvtpm.c
drivers/char/tpm/tpm_ibmvtpm.h
drivers/char/tpm/tpm_tis_i2c_cr50.c
drivers/clk/clk-devres.c
drivers/clk/clk-stm32f4.c
drivers/clk/hisilicon/Kconfig
drivers/clk/imx/clk-imx6q.c
drivers/clk/qcom/clk-smd-rpm.c
drivers/clk/qcom/gdsc.c
drivers/clk/renesas/rcar-usb2-clock-sel.c
drivers/clk/tegra/clk-sdmmc-mux.c
drivers/clocksource/exynos_mct.c
drivers/clocksource/ingenic-sysost.c
drivers/clocksource/sh_cmt.c
drivers/clocksource/timer-fttmr010.c
drivers/clocksource/timer-mediatek.c
drivers/cpufreq/armada-37xx-cpufreq.c
drivers/cpufreq/cpufreq-dt-platdev.c
drivers/cpufreq/scmi-cpufreq.c
drivers/cpuidle/governors/teo.c
drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c
drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c
drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
drivers/crypto/atmel-aes.c
drivers/crypto/atmel-tdes.c
drivers/crypto/ccp/sev-dev.c
drivers/crypto/ccp/sp-pci.c
drivers/crypto/hisilicon/hpre/hpre_main.c
drivers/crypto/hisilicon/qm.c
drivers/crypto/hisilicon/qm.h
drivers/crypto/hisilicon/sec2/sec.h
drivers/crypto/hisilicon/sec2/sec_main.c
drivers/crypto/hisilicon/zip/zip_main.c
drivers/crypto/mxs-dcp.c
drivers/crypto/omap-aes.c
drivers/crypto/omap-crypto.c
drivers/crypto/omap-des.c
drivers/crypto/omap-sham.c
drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
drivers/crypto/qat/qat_4xxx/adf_drv.c
drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h
drivers/crypto/qat/qat_c3xxx/adf_drv.c
drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h
drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h
drivers/crypto/qat/qat_c62x/adf_drv.c
drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c
drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h
drivers/crypto/qat/qat_c62xvf/adf_drv.c
drivers/crypto/qat/qat_common/adf_accel_devices.h
drivers/crypto/qat/qat_common/adf_aer.c
drivers/crypto/qat/qat_common/adf_common_drv.h
drivers/crypto/qat/qat_common/adf_init.c
drivers/crypto/qat/qat_common/adf_isr.c
drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
drivers/crypto/qat/qat_common/adf_pf2vf_msg.h
drivers/crypto/qat/qat_common/adf_sriov.c
drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
drivers/crypto/qat/qat_common/adf_vf_isr.c
drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
drivers/crypto/qat/qat_dh895xcc/adf_drv.c
drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h
drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
drivers/crypto/virtio/virtio_crypto_core.c
drivers/dax/super.c
drivers/dma/idxd/idxd.h
drivers/dma/idxd/init.c
drivers/dma/idxd/irq.c
drivers/dma/idxd/submit.c
drivers/dma/idxd/sysfs.c
drivers/dma/imx-dma.c
drivers/dma/of-dma.c
drivers/dma/sh/usb-dmac.c
drivers/dma/stm32-dma.c
drivers/dma/stm32-dmamux.c
drivers/dma/uniphier-xdmac.c
drivers/dma/xilinx/xilinx_dma.c
drivers/edac/altera_edac.c
drivers/edac/amd64_edac.c
drivers/edac/edac_mc.c
drivers/edac/i10nm_base.c
drivers/edac/mce_amd.c
drivers/edac/skx_base.c
drivers/edac/skx_common.c
drivers/edac/skx_common.h
drivers/firmware/broadcom/tee_bnxt_fw.c
drivers/firmware/efi/cper.c
drivers/firmware/efi/libstub/arm64-stub.c
drivers/firmware/efi/libstub/randomalloc.c
drivers/firmware/smccc/smccc.c
drivers/fpga/dfl-fme-perf.c
drivers/gpio/Kconfig
drivers/gpio/Makefile
drivers/gpio/gpio-104-dio-48e.c
drivers/gpio/gpio-104-idi-48.c
drivers/gpio/gpio-104-idio-16.c
drivers/gpio/gpio-altera.c
drivers/gpio/gpio-aspeed-sgpio.c
drivers/gpio/gpio-aspeed.c
drivers/gpio/gpio-ath79.c
drivers/gpio/gpio-bcm-kona.c
drivers/gpio/gpio-brcmstb.c
drivers/gpio/gpio-cadence.c
drivers/gpio/gpio-davinci.c
drivers/gpio/gpio-dln2.c
drivers/gpio/gpio-em.c
drivers/gpio/gpio-ep93xx.c
drivers/gpio/gpio-ftgpio010.c
drivers/gpio/gpio-hisi.c
drivers/gpio/gpio-hlwd.c
drivers/gpio/gpio-merrifield.c
drivers/gpio/gpio-mpc8xxx.c
drivers/gpio/gpio-mt7621.c
drivers/gpio/gpio-mxc.c
drivers/gpio/gpio-mxs.c
drivers/gpio/gpio-omap.c
drivers/gpio/gpio-pci-idio-16.c
drivers/gpio/gpio-pcie-idio-24.c
drivers/gpio/gpio-pl061.c
drivers/gpio/gpio-pxa.c
drivers/gpio/gpio-rcar.c
drivers/gpio/gpio-rda.c
drivers/gpio/gpio-realtek-otto.c
drivers/gpio/gpio-rockchip.c [new file with mode: 0644]
drivers/gpio/gpio-sch.c
drivers/gpio/gpio-sodaville.c
drivers/gpio/gpio-sprd.c
drivers/gpio/gpio-tb10x.c
drivers/gpio/gpio-tegra.c
drivers/gpio/gpio-tegra186.c
drivers/gpio/gpio-tqmx86.c
drivers/gpio/gpio-vf610.c
drivers/gpio/gpio-ws16c48.c
drivers/gpio/gpio-xgs-iproc.c
drivers/gpio/gpio-xilinx.c
drivers/gpio/gpio-xlp.c
drivers/gpio/gpio-zynq.c
drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c
drivers/gpu/drm/amd/display/dc/dc.h
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
drivers/gpu/drm/amd/include/atomfirmware.h
drivers/gpu/drm/amd/pm/inc/smu_v13_0.h
drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_pmfw.h
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
drivers/gpu/drm/drm_ioc32.c
drivers/gpu/drm/i915/display/intel_bios.c
drivers/gpu/drm/i915/display/intel_ddi.c
drivers/gpu/drm/i915/display/intel_display.c
drivers/gpu/drm/i915/display/intel_display_power.c
drivers/gpu/drm/i915/display/intel_dp.c
drivers/gpu/drm/i915/display/intel_dp_link_training.c
drivers/gpu/drm/i915/gt/intel_timeline.c
drivers/gpu/drm/i915/gvt/handlers.c
drivers/gpu/drm/i915/gvt/mmio_context.c
drivers/gpu/drm/i915/i915_globals.c
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_pci.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_device_info.c
drivers/gpu/drm/imx/ipuv3-plane.c
drivers/gpu/drm/kmb/kmb_drv.c
drivers/gpu/drm/kmb/kmb_drv.h
drivers/gpu/drm/kmb/kmb_plane.c
drivers/gpu/drm/mediatek/mtk_disp_color.c
drivers/gpu/drm/mediatek/mtk_disp_ovl.c
drivers/gpu/drm/mediatek/mtk_dpi.c
drivers/gpu/drm/mediatek/mtk_drm_crtc.c
drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
drivers/gpu/drm/mediatek/mtk_drm_plane.c
drivers/gpu/drm/meson/meson_registers.h
drivers/gpu/drm/meson/meson_viu.c
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
drivers/gpu/drm/msm/dp/dp_catalog.c
drivers/gpu/drm/msm/dp/dp_ctrl.c
drivers/gpu/drm/msm/dp/dp_display.c
drivers/gpu/drm/msm/msm_iommu.c
drivers/gpu/drm/nouveau/dispnv50/disp.c
drivers/gpu/drm/nouveau/dispnv50/head.c
drivers/gpu/drm/nouveau/dispnv50/head.h
drivers/gpu/drm/nouveau/include/nvif/cl0080.h
drivers/gpu/drm/nouveau/include/nvif/class.h
drivers/gpu/drm/nouveau/include/nvif/client.h
drivers/gpu/drm/nouveau/include/nvif/driver.h
drivers/gpu/drm/nouveau/include/nvkm/core/client.h
drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
drivers/gpu/drm/nouveau/nouveau_abi16.c
drivers/gpu/drm/nouveau/nouveau_chan.c
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nouveau_mem.c
drivers/gpu/drm/nouveau/nouveau_nvif.c
drivers/gpu/drm/nouveau/nouveau_svm.c
drivers/gpu/drm/nouveau/nouveau_usif.c
drivers/gpu/drm/nouveau/nvif/client.c
drivers/gpu/drm/nouveau/nvif/object.c
drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h
drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c [deleted file]
drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c [deleted file]
drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu102.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.h
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
drivers/gpu/drm/panel/panel-simple.c
drivers/gpu/drm/ttm/ttm_device.c
drivers/gpu/drm/ttm/ttm_module.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/ipu-v3/ipu-common.c
drivers/gpu/ipu-v3/ipu-cpmem.c
drivers/hid/Kconfig
drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
drivers/hid/hid-apple.c
drivers/hid/hid-asus.c
drivers/hid/hid-ft260.c
drivers/hid/intel-ish-hid/ishtp-hid-client.c
drivers/hid/intel-ish-hid/ishtp-hid.h
drivers/hid/intel-ish-hid/ishtp/bus.c
drivers/hid/usbhid/Kconfig
drivers/hid/wacom_wac.c
drivers/i2c/busses/i2c-bcm-iproc.c
drivers/i2c/i2c-dev.c
drivers/iio/accel/Kconfig
drivers/iio/accel/fxls8962af-core.c
drivers/iio/adc/palmas_gpadc.c
drivers/iio/adc/rn5t618-adc.c
drivers/iio/adc/ti-ads7950.c
drivers/iio/humidity/hdc100x.c
drivers/iio/imu/adis.c
drivers/infiniband/core/cma.c
drivers/infiniband/core/uverbs_std_types_mr.c
drivers/infiniband/hw/bnxt_re/ib_verbs.c
drivers/infiniband/hw/bnxt_re/main.c
drivers/infiniband/hw/cxgb4/cq.c
drivers/infiniband/hw/cxgb4/ev.c
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
drivers/infiniband/hw/efa/efa_main.c
drivers/infiniband/hw/hfi1/sdma.c
drivers/infiniband/hw/hns/hns_roce_cmd.c
drivers/infiniband/hw/hns/hns_roce_main.c
drivers/infiniband/hw/irdma/Kconfig
drivers/infiniband/hw/mlx5/cq.c
drivers/infiniband/hw/mlx5/devx.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/sw/rxe/rxe_mcast.c
drivers/infiniband/sw/rxe/rxe_net.c
drivers/infiniband/sw/rxe/rxe_queue.c
drivers/infiniband/sw/rxe/rxe_resp.c
drivers/interconnect/core.c
drivers/interconnect/qcom/icc-rpmh.c
drivers/iommu/dma-iommu.c
drivers/iommu/intel/pasid.c
drivers/iommu/intel/pasid.h
drivers/iommu/intel/svm.c
drivers/iommu/iommu.c
drivers/iommu/s390-iommu.c
drivers/ipack/carriers/tpci200.c
drivers/irqchip/irq-alpine-msi.c
drivers/irqchip/irq-apple-aic.c
drivers/irqchip/irq-gic-v2m.c
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-gic-v3-mbi.c
drivers/irqchip/irq-gic-v3.c
drivers/irqchip/irq-loongson-pch-pic.c
drivers/irqchip/irq-ls-scfg-msi.c
drivers/irqchip/irq-mtk-sysirq.c
drivers/irqchip/irq-mvebu-gicp.c
drivers/irqchip/irq-mvebu-odmi.c
drivers/irqchip/irq-partition-percpu.c
drivers/irqchip/qcom-pdc.c
drivers/lightnvm/Kconfig [deleted file]
drivers/lightnvm/Makefile [deleted file]
drivers/lightnvm/core.c [deleted file]
drivers/lightnvm/pblk-cache.c [deleted file]
drivers/lightnvm/pblk-core.c [deleted file]
drivers/lightnvm/pblk-gc.c [deleted file]
drivers/lightnvm/pblk-init.c [deleted file]
drivers/lightnvm/pblk-map.c [deleted file]
drivers/lightnvm/pblk-rb.c [deleted file]
drivers/lightnvm/pblk-read.c [deleted file]
drivers/lightnvm/pblk-recovery.c [deleted file]
drivers/lightnvm/pblk-rl.c [deleted file]
drivers/lightnvm/pblk-sysfs.c [deleted file]
drivers/lightnvm/pblk-trace.h [deleted file]
drivers/lightnvm/pblk-write.c [deleted file]
drivers/lightnvm/pblk.h [deleted file]
drivers/md/Kconfig
drivers/md/bcache/Kconfig
drivers/md/bcache/btree.c
drivers/md/bcache/super.c
drivers/md/bcache/util.h
drivers/md/dm-ebs-target.c
drivers/md/dm-integrity.c
drivers/md/dm-ioctl.c
drivers/md/dm-rq.c
drivers/md/dm-table.c
drivers/md/dm-writecache.c
drivers/md/dm.c
drivers/md/md.h
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/media/common/videobuf2/videobuf2-core.c
drivers/media/pci/intel/ipu3/cio2-bridge.c
drivers/media/platform/atmel/Kconfig
drivers/media/platform/atmel/Makefile
drivers/media/platform/atmel/atmel-isc-base.c
drivers/media/usb/dvb-usb-v2/rtl28xxu.c
drivers/mfd/db8500-prcmu.c
drivers/mfd/fsl-imx25-tsadc.c
drivers/mfd/ioc3.c
drivers/mfd/qcom-pm8xxx.c
drivers/mmc/core/block.c
drivers/mmc/core/core.c
drivers/mmc/core/core.h
drivers/mmc/core/mmc.c
drivers/mmc/host/dw_mmc.c
drivers/mmc/host/mmci_stm32_sdmmc.c
drivers/mmc/host/sdhci-iproc.c
drivers/mmc/host/sdhci-msm.c
drivers/mmc/host/sdhci-tegra.c
drivers/mtd/chips/cfi_cmdset_0002.c
drivers/mtd/devices/mchp48l640.c
drivers/mtd/mtd_blkdevs.c
drivers/mtd/mtdcore.c
drivers/mtd/nand/raw/nand_base.c
drivers/net/bareudp.c
drivers/net/can/m_can/m_can.c
drivers/net/can/spi/hi311x.c
drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
drivers/net/can/usb/ems_usb.c
drivers/net/can/usb/esd_usb2.c
drivers/net/can/usb/mcba_usb.c
drivers/net/can/usb/peak_usb/pcan_usb.c
drivers/net/can/usb/usb_8dev.c
drivers/net/dsa/hirschmann/hellcreek.c
drivers/net/dsa/lan9303-core.c
drivers/net/dsa/lantiq_gswip.c
drivers/net/dsa/microchip/ksz8795.c
drivers/net/dsa/microchip/ksz8795_reg.h
drivers/net/dsa/microchip/ksz_common.h
drivers/net/dsa/mt7530.c
drivers/net/dsa/mv88e6xxx/chip.c
drivers/net/dsa/mv88e6xxx/serdes.c
drivers/net/dsa/qca/ar9331.c
drivers/net/dsa/sja1105/sja1105_dynamic_config.c
drivers/net/dsa/sja1105/sja1105_main.c
drivers/net/dsa/sja1105/sja1105_mdio.c
drivers/net/ethernet/apm/xgene-v2/main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
drivers/net/ethernet/cadence/macb_ptp.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/dec/tulip/winbond-840.c
drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
drivers/net/ethernet/intel/e1000e/ich8lan.c
drivers/net/ethernet/intel/e1000e/ich8lan.h
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_txrx.h
drivers/net/ethernet/intel/iavf/iavf.h
drivers/net/ethernet/intel/iavf/iavf_main.c
drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
drivers/net/ethernet/intel/ice/ice.h
drivers/net/ethernet/intel/ice/ice_devlink.c
drivers/net/ethernet/intel/ice/ice_main.c
drivers/net/ethernet/intel/ice/ice_ptp.c
drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
drivers/net/ethernet/intel/igc/igc_main.c
drivers/net/ethernet/intel/igc/igc_ptp.c
drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/marvell/mvpp2/mvpp2.h
drivers/net/ethernet/marvell/octeontx2/af/cgx.c
drivers/net/ethernet/marvell/octeontx2/af/npc.h
drivers/net/ethernet/marvell/octeontx2/af/rvu.c
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
drivers/net/ethernet/marvell/prestera/prestera_devlink.c
drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx5/core/cq.c
drivers/net/ethernet/mellanox/mlx5/core/dev.c
drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
drivers/net/ethernet/mellanox/mlx5/core/en/params.c
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/eq.c
drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
drivers/net/ethernet/mellanox/mlx5/core/esw/sample.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/health.c
drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
drivers/net/ethernet/mscc/ocelot.c
drivers/net/ethernet/mscc/ocelot_io.c
drivers/net/ethernet/natsemi/natsemi.c
drivers/net/ethernet/neterion/vxge/vxge-main.c
drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
drivers/net/ethernet/pensando/ionic/ionic_lif.c
drivers/net/ethernet/pensando/ionic/ionic_lif.h
drivers/net/ethernet/pensando/ionic/ionic_phc.c
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
drivers/net/ethernet/qlogic/qed/qed_ll2.c
drivers/net/ethernet/qlogic/qed/qed_main.c
drivers/net/ethernet/qlogic/qed/qed_rdma.c
drivers/net/ethernet/qlogic/qede/qede.h
drivers/net/ethernet/qlogic/qede/qede_filter.c
drivers/net/ethernet/qlogic/qede/qede_main.c
drivers/net/ethernet/qlogic/qla3xxx.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
drivers/net/ethernet/realtek/r8169_main.c
drivers/net/ethernet/rocker/rocker_main.c
drivers/net/ethernet/rocker/rocker_ofdpa.c
drivers/net/ethernet/sis/sis900.c
drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
drivers/net/ethernet/stmicro/stmmac/stmmac.h
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c
drivers/net/ethernet/sun/niu.c
drivers/net/ethernet/ti/am65-cpsw-nuss.c
drivers/net/ethernet/ti/am65-cpsw-switchdev.c
drivers/net/ethernet/ti/cpsw_new.c
drivers/net/ethernet/ti/cpsw_priv.h
drivers/net/ethernet/ti/cpsw_switchdev.c
drivers/net/hamradio/6pack.c
drivers/net/ieee802154/mac802154_hwsim.c
drivers/net/mdio/mdio-mux.c
drivers/net/pcs/pcs-xpcs.c
drivers/net/phy/broadcom.c
drivers/net/phy/mediatek-ge.c
drivers/net/phy/micrel.c
drivers/net/ppp/ppp_generic.c
drivers/net/usb/asix.h
drivers/net/usb/asix_common.c
drivers/net/usb/asix_devices.c
drivers/net/usb/lan78xx.c
drivers/net/usb/pegasus.c
drivers/net/usb/r8152.c
drivers/net/virtio_net.c
drivers/net/vrf.c
drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
drivers/net/wireless/mediatek/mt76/mt7921/mcu.h
drivers/net/wireless/virt_wifi.c
drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c
drivers/net/wwan/iosm/iosm_ipc_mmio.h
drivers/net/wwan/iosm/iosm_ipc_mux_codec.c
drivers/net/wwan/iosm/iosm_ipc_mux_codec.h
drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c
drivers/net/wwan/iosm/iosm_ipc_wwan.c
drivers/net/wwan/mhi_wwan_ctrl.c
drivers/net/wwan/wwan_core.c
drivers/nfc/nfcsim.c
drivers/nfc/s3fwrn5/firmware.c
drivers/nvdimm/namespace_devs.c
drivers/nvme/host/Kconfig
drivers/nvme/host/Makefile
drivers/nvme/host/core.c
drivers/nvme/host/fabrics.c
drivers/nvme/host/ioctl.c
drivers/nvme/host/lightnvm.c [deleted file]
drivers/nvme/host/multipath.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/nvme/host/tcp.c
drivers/nvme/host/trace.c
drivers/nvme/target/Kconfig
drivers/nvme/target/core.c
drivers/nvme/target/fabrics-cmd.c
drivers/nvme/target/loop.c
drivers/nvme/target/trace.c
drivers/nvme/target/zns.c
drivers/opp/core.c
drivers/opp/of.c
drivers/pci/controller/pci-ixp4xx.c
drivers/pci/msi.c
drivers/pci/pci-sysfs.c
drivers/pci/quirks.c
drivers/pcmcia/i82092.c
drivers/pinctrl/actions/pinctrl-owl.c
drivers/pinctrl/bcm/pinctrl-bcm2835.c
drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
drivers/pinctrl/intel/pinctrl-baytrail.c
drivers/pinctrl/intel/pinctrl-cherryview.c
drivers/pinctrl/intel/pinctrl-lynxpoint.c
drivers/pinctrl/intel/pinctrl-tigerlake.c
drivers/pinctrl/mediatek/mtk-eint.c
drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
drivers/pinctrl/nomadik/pinctrl-nomadik.c
drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
drivers/pinctrl/pinctrl-amd.c
drivers/pinctrl/pinctrl-at91.c
drivers/pinctrl/pinctrl-equilibrium.c
drivers/pinctrl/pinctrl-ingenic.c
drivers/pinctrl/pinctrl-k210.c
drivers/pinctrl/pinctrl-microchip-sgpio.c
drivers/pinctrl/pinctrl-ocelot.c
drivers/pinctrl/pinctrl-oxnas.c
drivers/pinctrl/pinctrl-pic32.c
drivers/pinctrl/pinctrl-pistachio.c
drivers/pinctrl/pinctrl-rockchip.c
drivers/pinctrl/pinctrl-rockchip.h [new file with mode: 0644]
drivers/pinctrl/pinctrl-single.c
drivers/pinctrl/pinctrl-st.c
drivers/pinctrl/qcom/Kconfig
drivers/pinctrl/qcom/pinctrl-msm.c
drivers/pinctrl/samsung/pinctrl-exynos.c
drivers/pinctrl/samsung/pinctrl-s3c24xx.c
drivers/pinctrl/samsung/pinctrl-s3c64xx.c
drivers/pinctrl/spear/pinctrl-plgpio.c
drivers/pinctrl/sunxi/pinctrl-sunxi.c
drivers/platform/x86/Kconfig
drivers/platform/x86/amd-pmc.c
drivers/platform/x86/asus-nb-wmi.c
drivers/platform/x86/dual_accel_detect.h [new file with mode: 0644]
drivers/platform/x86/gigabyte-wmi.c
drivers/platform/x86/intel-hid.c
drivers/platform/x86/intel-vbtn.c
drivers/platform/x86/pcengines-apuv2.c
drivers/platform/x86/think-lmi.c
drivers/platform/x86/think-lmi.h
drivers/platform/x86/thinkpad_acpi.c
drivers/platform/x86/wireless-hotkey.c
drivers/power/reset/Kconfig
drivers/power/reset/Makefile
drivers/power/reset/linkstation-poweroff.c
drivers/power/reset/tps65086-restart.c [new file with mode: 0644]
drivers/power/supply/Kconfig
drivers/power/supply/Makefile
drivers/power/supply/ab8500-bm.h
drivers/power/supply/ab8500_bmdata.c
drivers/power/supply/ab8500_btemp.c
drivers/power/supply/ab8500_chargalg.c [new file with mode: 0644]
drivers/power/supply/ab8500_charger.c
drivers/power/supply/ab8500_fg.c
drivers/power/supply/abx500_chargalg.c [deleted file]
drivers/power/supply/axp288_charger.c
drivers/power/supply/axp288_fuel_gauge.c
drivers/power/supply/bq24735-charger.c
drivers/power/supply/cros_peripheral_charger.c [new file with mode: 0644]
drivers/power/supply/cw2015_battery.c
drivers/power/supply/max17042_battery.c
drivers/power/supply/mt6360_charger.c [new file with mode: 0644]
drivers/power/supply/power_supply_core.c
drivers/power/supply/qcom_smbb.c
drivers/power/supply/rn5t618_power.c
drivers/power/supply/sbs-battery.c
drivers/power/supply/sc27xx_fuel_gauge.c
drivers/power/supply/smb347-charger.c
drivers/ptp/Kconfig
drivers/ptp/ptp_sysfs.c
drivers/regulator/Kconfig
drivers/regulator/Makefile
drivers/regulator/bd718x7-regulator.c
drivers/regulator/da9063-regulator.c
drivers/regulator/dbx500-prcmu.c
drivers/regulator/devres.c
drivers/regulator/fixed.c
drivers/regulator/hi6421v600-regulator.c
drivers/regulator/irq_helpers.c
drivers/regulator/mt6358-regulator.c
drivers/regulator/mt6359-regulator.c
drivers/regulator/mt6397-regulator.c
drivers/regulator/rt5033-regulator.c
drivers/regulator/rt6245-regulator.c
drivers/regulator/rtq2134-regulator.c [new file with mode: 0644]
drivers/regulator/rtq6752-regulator.c [new file with mode: 0644]
drivers/regulator/sy7636a-regulator.c
drivers/regulator/sy8824x.c
drivers/regulator/sy8827n.c
drivers/regulator/tps65910-regulator.c
drivers/regulator/vctrl-regulator.c
drivers/reset/Kconfig
drivers/reset/reset-zynqmp.c
drivers/s390/block/dasd_diag.c
drivers/s390/block/dasd_eckd.c
drivers/s390/block/dasd_fba.c
drivers/s390/block/dasd_genhd.c
drivers/s390/block/dasd_ioctl.c
drivers/s390/block/dcssblk.c
drivers/s390/char/sclp.c
drivers/s390/char/sclp.h
drivers/s390/char/sclp_cmd.c
drivers/s390/char/sclp_config.c
drivers/s390/char/sclp_early_core.c
drivers/s390/char/zcore.c
drivers/s390/cio/css.c
drivers/s390/cio/qdio.h
drivers/s390/cio/qdio_debug.c
drivers/s390/cio/qdio_main.c
drivers/s390/cio/qdio_setup.c
drivers/s390/crypto/ap_bus.c
drivers/s390/crypto/ap_bus.h
drivers/s390/crypto/ap_queue.c
drivers/s390/crypto/vfio_ap_ops.c
drivers/s390/crypto/zcrypt_api.c
drivers/s390/crypto/zcrypt_card.c
drivers/s390/crypto/zcrypt_ccamisc.c
drivers/s390/crypto/zcrypt_cex2a.c
drivers/s390/crypto/zcrypt_cex2c.c
drivers/s390/crypto/zcrypt_cex4.c
drivers/s390/crypto/zcrypt_queue.c
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/scsi/zfcp_qdio.c
drivers/scsi/arm/acornscsi.c
drivers/scsi/arm/fas216.c
drivers/scsi/device_handler/scsi_dh_rdac.c
drivers/scsi/ibmvscsi/ibmvfc.c
drivers/scsi/ibmvscsi/ibmvfc.h
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/megaraid/megaraid_mm.c
drivers/scsi/mpt3sas/mpt3sas_base.c
drivers/scsi/pm8001/pm8001_sas.c
drivers/scsi/scsi_scan.c
drivers/scsi/scsi_sysfs.c
drivers/scsi/sd.c
drivers/scsi/sg.c
drivers/scsi/sr.c
drivers/scsi/st.c
drivers/scsi/st.h
drivers/scsi/storvsc_drv.c
drivers/slimbus/messaging.c
drivers/slimbus/qcom-ngd-ctrl.c
drivers/soc/Makefile
drivers/soc/fsl/qe/qe_ic.c
drivers/soc/imx/soc-imx8m.c
drivers/soc/ixp4xx/ixp4xx-npe.c
drivers/soc/ixp4xx/ixp4xx-qmgr.c
drivers/soc/tegra/Kconfig
drivers/spi/Kconfig
drivers/spi/Makefile
drivers/spi/spi-bcm2835aux.c
drivers/spi/spi-cadence-quadspi.c
drivers/spi/spi-coldfire-qspi.c
drivers/spi/spi-davinci.c
drivers/spi/spi-ep93xx.c
drivers/spi/spi-fsi.c
drivers/spi/spi-fsl-dspi.c
drivers/spi/spi-geni-qcom.c
drivers/spi/spi-imx.c
drivers/spi/spi-meson-spicc.c
drivers/spi/spi-mt65xx.c
drivers/spi/spi-mux.c
drivers/spi/spi-mxic.c
drivers/spi/spi-orion.c
drivers/spi/spi-pic32.c
drivers/spi/spi-pxa2xx.c
drivers/spi/spi-rockchip-sfc.c [new file with mode: 0644]
drivers/spi/spi-sprd-adi.c
drivers/spi/spi-stm32.c
drivers/spi/spi-tegra114.c
drivers/spi/spi-tegra20-slink.c
drivers/spi/spi-zynq-qspi.c
drivers/spi/spi.c
drivers/staging/media/atomisp/pci/atomisp_ioctl.c
drivers/staging/media/av7110/audio.h [deleted file]
drivers/staging/media/av7110/av7110.h
drivers/staging/media/av7110/osd.h [deleted file]
drivers/staging/media/av7110/video.h [deleted file]
drivers/staging/mt7621-pci/pci-mt7621.c
drivers/staging/rtl8712/hal_init.c
drivers/staging/rtl8712/rtl8712_led.c
drivers/staging/rtl8712/rtl871x_led.h
drivers/staging/rtl8712/rtl871x_pwrctrl.c
drivers/staging/rtl8712/rtl871x_pwrctrl.h
drivers/staging/rtl8712/usb_intf.c
drivers/staging/rtl8723bs/Kconfig
drivers/staging/rtl8723bs/hal/sdio_ops.c
drivers/tee/optee/call.c
drivers/tee/optee/core.c
drivers/tee/optee/optee_private.h
drivers/tee/optee/rpc.c
drivers/tee/optee/shm_pool.c
drivers/tee/tee_shm.c
drivers/thunderbolt/switch.c
drivers/tty/serial/8250/8250_aspeed_vuart.c
drivers/tty/serial/8250/8250_fsl.c
drivers/tty/serial/8250/8250_mtk.c
drivers/tty/serial/8250/8250_pci.c
drivers/tty/serial/8250/8250_port.c
drivers/tty/serial/fsl_lpuart.c
drivers/tty/serial/max310x.c
drivers/tty/serial/serial-tegra.c
drivers/tty/vt/vt.c
drivers/tty/vt/vt_ioctl.c
drivers/usb/cdns3/cdns3-ep0.c
drivers/usb/cdns3/cdnsp-gadget.c
drivers/usb/cdns3/cdnsp-gadget.h
drivers/usb/cdns3/cdnsp-ring.c
drivers/usb/class/usbtmc.c
drivers/usb/common/usb-otg-fsm.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/function/f_hid.c
drivers/usb/gadget/function/u_audio.c
drivers/usb/gadget/udc/max3420_udc.c
drivers/usb/host/ohci-at91.c
drivers/usb/host/xhci-pci-renesas.c
drivers/usb/musb/omap2430.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/option.c
drivers/usb/serial/pl2303.c
drivers/usb/typec/tcpm/tcpm.c
drivers/vdpa/ifcvf/ifcvf_main.c
drivers/vdpa/mlx5/core/mr.c
drivers/vdpa/mlx5/net/mlx5_vnet.c
drivers/vdpa/vdpa_sim/vdpa_sim.c
drivers/vdpa/virtio_pci/vp_vdpa.c
drivers/vhost/vdpa.c
drivers/vhost/vhost.c
drivers/vhost/vringh.c
drivers/virt/acrn/vm.c
drivers/virtio/virtio.c
drivers/virtio/virtio_mem.c
drivers/virtio/virtio_pci_common.c
drivers/virtio/virtio_ring.c
drivers/virtio/virtio_vdpa.c
drivers/xen/events/events_base.c
fs/9p/vfs_file.c
fs/Kconfig
fs/Kconfig.binfmt
fs/Makefile
fs/afs/flock.c
fs/aio.c
fs/binfmt_em86.c [deleted file]
fs/block_dev.c
fs/btrfs/compression.c
fs/btrfs/disk-io.c
fs/btrfs/inode.c
fs/btrfs/tree-log.c
fs/btrfs/volumes.c
fs/ceph/addr.c
fs/ceph/caps.c
fs/ceph/file.c
fs/ceph/locks.c
fs/ceph/mds_client.c
fs/ceph/mdsmap.c
fs/ceph/snap.c
fs/ceph/super.h
fs/cifs/cifsglob.h
fs/cifs/dir.c
fs/cifs/file.c
fs/cifs/fs_context.c
fs/cifs/inode.c
fs/cifs/misc.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/configfs/file.c
fs/dax.c
fs/eventfd.c
fs/ext2/Kconfig
fs/ext2/dir.c
fs/ext2/ext2.h
fs/ext2/file.c
fs/ext2/inode.c
fs/ext2/namei.c
fs/ext2/super.c
fs/ext4/ext4.h
fs/ext4/ext4_jbd2.c
fs/ext4/extents.c
fs/ext4/file.c
fs/ext4/inode.c
fs/ext4/ioctl.c
fs/ext4/mmp.c
fs/ext4/namei.c
fs/ext4/super.c
fs/ext4/truncate.h
fs/f2fs/data.c
fs/f2fs/f2fs.h
fs/f2fs/file.c
fs/f2fs/super.c
fs/f2fs/sysfs.c
fs/fat/fatent.c
fs/fcntl.c
fs/fuse/dax.c
fs/fuse/dir.c
fs/fuse/file.c
fs/fuse/fuse_i.h
fs/fuse/inode.c
fs/gfs2/file.c
fs/hpfs/Kconfig
fs/hpfs/file.c
fs/inode.c
fs/io-wq.c
fs/io_uring.c
fs/ioctl.c
fs/isofs/inode.c
fs/isofs/isofs.h
fs/isofs/joliet.c
fs/locks.c
fs/namei.c
fs/namespace.c
fs/nfs/file.c
fs/nfsd/nfs4state.c
fs/nfsd/vfs.c
fs/nilfs2/super.c
fs/notify/fanotify/fanotify_user.c
fs/notify/fsnotify.c
fs/notify/fsnotify.h
fs/notify/inotify/inotify_user.c
fs/notify/mark.c
fs/ocfs2/file.c
fs/ocfs2/locks.c
fs/open.c
fs/overlayfs/export.c
fs/overlayfs/file.c
fs/overlayfs/readdir.c
fs/pipe.c
fs/read_write.c
fs/reiserfs/stree.c
fs/reiserfs/super.c
fs/remap_range.c
fs/squashfs/block.c
fs/squashfs/lz4_wrapper.c
fs/squashfs/lzo_wrapper.c
fs/squashfs/xz_wrapper.c
fs/squashfs/zlib_wrapper.c
fs/squashfs/zstd_wrapper.c
fs/super.c
fs/timerfd.c
fs/udf/dir.c
fs/udf/ecma_167.h
fs/udf/inode.c
fs/udf/misc.c
fs/udf/namei.c
fs/udf/osta_udf.h
fs/udf/super.c
fs/udf/udf_sb.h
fs/udf/udfdecl.h
fs/udf/unicode.c
fs/xfs/libxfs/xfs_log_format.h
fs/xfs/xfs_bmap_util.c
fs/xfs/xfs_buf.c
fs/xfs/xfs_buf_item_recover.c
fs/xfs/xfs_file.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_inode.h
fs/xfs/xfs_inode_item_recover.c
fs/xfs/xfs_log.c
fs/xfs/xfs_log_cil.c
fs/xfs/xfs_log_priv.h
fs/xfs/xfs_super.c
fs/xfs/xfs_trace.h
fs/zonefs/super.c
fs/zonefs/zonefs.h
include/asm-generic/atomic-instrumented.h [deleted file]
include/asm-generic/atomic-long.h [deleted file]
include/asm-generic/bitops/atomic.h
include/asm-generic/bitops/lock.h
include/asm-generic/bitops/non-atomic.h
include/asm-generic/vmlinux.lds.h
include/crypto/public_key.h
include/crypto/sm4.h
include/dt-bindings/clock/ingenic,sysost.h
include/dt-bindings/power/summit,smb347-charger.h
include/linux/atomic-arch-fallback.h [deleted file]
include/linux/atomic.h
include/linux/atomic/atomic-arch-fallback.h [new file with mode: 0644]
include/linux/atomic/atomic-instrumented.h [new file with mode: 0644]
include/linux/atomic/atomic-long.h [new file with mode: 0644]
include/linux/backing-dev.h
include/linux/bio.h
include/linux/blk-cgroup.h
include/linux/blk-mq.h
include/linux/blk_types.h
include/linux/blkdev.h
include/linux/bpf-cgroup.h
include/linux/bpf_types.h
include/linux/bpf_verifier.h
include/linux/bvec.h
include/linux/cmdline-parser.h [deleted file]
include/linux/cpuhotplug.h
include/linux/cpuset.h
include/linux/debug_locks.h
include/linux/device-mapper.h
include/linux/device.h
include/linux/edac.h
include/linux/eventfd.h
include/linux/fanotify.h
include/linux/fiemap.h
include/linux/filter.h
include/linux/fs.h
include/linux/fsnotify.h
include/linux/ftrace.h
include/linux/genhd.h
include/linux/hrtimer.h
include/linux/inetdevice.h
include/linux/intel-ish-client-if.h
include/linux/interrupt.h
include/linux/ioprio.h
include/linux/irq.h
include/linux/kfence.h
include/linux/lightnvm.h [deleted file]
include/linux/linear_range.h
include/linux/local_lock_internal.h
include/linux/memcontrol.h
include/linux/mfd/rt5033-private.h
include/linux/mlx5/driver.h
include/linux/mlx5/mlx5_ifc_vdpa.h
include/linux/mmc/card.h
include/linux/mmc/host.h
include/linux/mmu_context.h
include/linux/moduleparam.h
include/linux/msi.h
include/linux/mutex.h
include/linux/netfilter/ipset/ip_set.h
include/linux/once.h
include/linux/padata.h
include/linux/pci_ids.h
include/linux/pid.h
include/linux/pipe_fs_i.h
include/linux/platform_data/cros_ec_commands.h
include/linux/platform_data/spi-mt65xx.h
include/linux/posix-timers.h
include/linux/power/max17042_battery.h
include/linux/power_supply.h
include/linux/preempt.h
include/linux/rbtree.h
include/linux/rbtree_types.h [new file with mode: 0644]
include/linux/rculist.h
include/linux/rcupdate.h
include/linux/rcutiny.h
include/linux/regmap.h
include/linux/regulator/consumer.h
include/linux/regulator/driver.h
include/linux/regulator/machine.h
include/linux/resctrl.h
include/linux/rtmutex.h
include/linux/rwbase_rt.h [new file with mode: 0644]
include/linux/rwlock_rt.h [new file with mode: 0644]
include/linux/rwlock_types.h
include/linux/rwsem.h
include/linux/sched.h
include/linux/sched/signal.h
include/linux/sched/sysctl.h
include/linux/sched/wake_q.h
include/linux/security.h
include/linux/serial_core.h
include/linux/skmsg.h
include/linux/spi/spi.h
include/linux/spinlock.h
include/linux/spinlock_api_smp.h
include/linux/spinlock_rt.h [new file with mode: 0644]
include/linux/spinlock_types.h
include/linux/spinlock_types_raw.h [new file with mode: 0644]
include/linux/srcutiny.h
include/linux/static_call.h
include/linux/tee_drv.h
include/linux/usb/otg-fsm.h
include/linux/vdpa.h
include/linux/virtio.h
include/linux/vringh.h
include/linux/wait.h
include/linux/writeback.h
include/linux/ww_mutex.h
include/net/bluetooth/hci_core.h
include/net/flow_offload.h
include/net/ip6_fib.h
include/net/ip6_route.h
include/net/llc_pdu.h
include/net/netns/conntrack.h
include/net/netns/xfrm.h
include/net/pkt_cls.h
include/net/psample.h
include/net/sctp/structs.h
include/trace/events/kyber.h
include/trace/events/mmflags.h
include/uapi/linux/dvb/audio.h [new file with mode: 0644]
include/uapi/linux/dvb/osd.h [new file with mode: 0644]
include/uapi/linux/dvb/video.h [new file with mode: 0644]
include/uapi/linux/fanotify.h
include/uapi/linux/fs.h
include/uapi/linux/idxd.h
include/uapi/linux/ioprio.h [new file with mode: 0644]
include/uapi/linux/lightnvm.h [deleted file]
include/uapi/linux/neighbour.h
include/uapi/linux/netfilter/nfnetlink_hook.h
include/uapi/linux/prctl.h
init/do_mounts.c
init/init_task.c
init/main.c
kernel/Kconfig.locks
kernel/bpf/core.c
kernel/bpf/disasm.c
kernel/bpf/hashtab.c
kernel/bpf/helpers.c
kernel/bpf/verifier.c
kernel/cfi.c
kernel/cgroup/cpuset.c
kernel/cgroup/rstat.c
kernel/cpu.c
kernel/cred.c
kernel/events/core.c
kernel/events/hw_breakpoint.c
kernel/fork.c
kernel/futex.c
kernel/irq/affinity.c
kernel/irq/chip.c
kernel/irq/cpuhotplug.c
kernel/irq/generic-chip.c
kernel/irq/ipi.c
kernel/irq/irqdesc.c
kernel/irq/irqdomain.c
kernel/irq/manage.c
kernel/irq/matrix.c
kernel/irq/msi.c
kernel/irq/pm.c
kernel/irq/proc.c
kernel/irq/timings.c
kernel/kcsan/debugfs.c
kernel/locking/Makefile
kernel/locking/locktorture.c
kernel/locking/mutex-debug.c
kernel/locking/mutex-debug.h [deleted file]
kernel/locking/mutex.c
kernel/locking/mutex.h
kernel/locking/rtmutex.c
kernel/locking/rtmutex_api.c [new file with mode: 0644]
kernel/locking/rtmutex_common.h
kernel/locking/rwbase_rt.c [new file with mode: 0644]
kernel/locking/rwsem.c
kernel/locking/semaphore.c
kernel/locking/spinlock.c
kernel/locking/spinlock_debug.c
kernel/locking/spinlock_rt.c [new file with mode: 0644]
kernel/locking/ww_mutex.h [new file with mode: 0644]
kernel/locking/ww_rt_mutex.c [new file with mode: 0644]
kernel/padata.c
kernel/params.c
kernel/pid.c
kernel/rcu/rcuscale.c
kernel/rcu/rcutorture.c
kernel/rcu/refscale.c
kernel/rcu/srcutiny.c
kernel/rcu/tasks.h
kernel/rcu/tree.c
kernel/rcu/tree_nocb.h [new file with mode: 0644]
kernel/rcu/tree_plugin.h
kernel/rcu/tree_stall.h
kernel/scftorture.c
kernel/sched/core.c
kernel/sched/deadline.c
kernel/sched/debug.c
kernel/sched/fair.c
kernel/sched/sched.h
kernel/sched/topology.c
kernel/seccomp.c
kernel/signal.c
kernel/smp.c
kernel/smpboot.c
kernel/softirq.c
kernel/time/clocksource-wdtest.c
kernel/time/clocksource.c
kernel/time/hrtimer.c
kernel/time/jiffies.c
kernel/time/posix-cpu-timers.c
kernel/time/posix-timers.c
kernel/time/tick-common.c
kernel/time/tick-internal.h
kernel/time/timekeeping.c
kernel/time/timer.c
kernel/torture.c
kernel/trace/Kconfig
kernel/trace/bpf_trace.c
kernel/trace/ftrace.c
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_events_hist.c
kernel/trace/trace_hwlat.c
kernel/trace/trace_osnoise.c
kernel/tracepoint.c
kernel/ucount.c
lib/Kconfig
lib/Kconfig.debug
lib/crypto/Kconfig
lib/crypto/Makefile
lib/crypto/blake2s.c
lib/crypto/chacha20poly1305.c
lib/crypto/curve25519.c
lib/crypto/sm4.c [new file with mode: 0644]
lib/debugobjects.c
lib/devmem_is_allowed.c
lib/linear_ranges.c
lib/mpi/mpiutil.c
lib/once.c
lib/string.c
lib/test_lockup.c
mm/backing-dev.c
mm/filemap.c
mm/gup.c
mm/hugetlb.c
mm/kfence/kfence_test.c
mm/kmemleak.c
mm/madvise.c
mm/memcontrol.c
mm/memory-failure.c
mm/memory_hotplug.c
mm/migrate.c
mm/mmap.c
mm/nommu.c
mm/page-writeback.c
mm/page_alloc.c
mm/readahead.c
mm/rmap.c
mm/shmem.c
mm/slab.h
mm/slub.c
mm/swap_slots.c
mm/swap_state.c
mm/truncate.c
mm/vmscan.c
mm/vmstat.c
net/bluetooth/hci_core.c
net/bluetooth/hci_sock.c
net/bluetooth/hci_sysfs.c
net/bpf/test_run.c
net/bridge/br_fdb.c
net/bridge/br_if.c
net/bridge/netfilter/nf_conntrack_bridge.c
net/can/j1939/transport.c
net/can/raw.c
net/core/devlink.c
net/core/flow_dissector.c
net/core/link_watch.c
net/core/page_pool.c
net/core/rtnetlink.c
net/core/skmsg.c
net/dccp/dccp.h
net/dsa/slave.c
net/ieee802154/socket.c
net/ipv4/cipso_ipv4.c
net/ipv4/igmp.c
net/ipv4/ip_gre.c
net/ipv4/ip_tunnel.c
net/ipv4/route.c
net/ipv4/tcp_bbr.c
net/ipv4/tcp_offload.c
net/ipv4/udp_offload.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_output.c
net/ipv6/route.c
net/llc/af_llc.c
net/llc/llc_s_ac.c
net/mac80211/cfg.c
net/mac80211/ieee80211_i.h
net/mac80211/main.c
net/mac80211/mlme.c
net/mac80211/rx.c
net/mac80211/tx.c
net/mptcp/options.c
net/mptcp/pm_netlink.c
net/netfilter/ipset/ip_set_hash_ip.c
net/netfilter/ipset/ip_set_hash_ipmark.c
net/netfilter/ipset/ip_set_hash_ipport.c
net/netfilter/ipset/ip_set_hash_ipportip.c
net/netfilter/ipset/ip_set_hash_ipportnet.c
net/netfilter/ipset/ip_set_hash_net.c
net/netfilter/ipset/ip_set_hash_netiface.c
net/netfilter/ipset/ip_set_hash_netnet.c
net/netfilter/ipset/ip_set_hash_netport.c
net/netfilter/ipset/ip_set_hash_netportnet.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_proto_tcp.c
net/netfilter/nf_conntrack_proto_udp.c
net/netfilter/nf_conntrack_standalone.c
net/netfilter/nf_flow_table_core.c
net/netfilter/nf_tables_api.c
net/netfilter/nfnetlink_hook.c
net/netfilter/nft_last.c
net/netfilter/nft_nat.c
net/openvswitch/flow.c
net/openvswitch/vport.c
net/qrtr/qrtr.c
net/rds/ib_frmr.c
net/sched/act_mirred.c
net/sched/sch_cake.c
net/sched/sch_ets.c
net/sched/sch_generic.c
net/sched/sch_taprio.c
net/sctp/auth.c
net/sctp/input.c
net/sctp/ipv6.c
net/sctp/sm_statefuns.c
net/sctp/transport.c
net/smc/af_smc.c
net/smc/smc_core.c
net/smc/smc_core.h
net/smc/smc_llc.c
net/smc/smc_tx.c
net/smc/smc_wr.c
net/sunrpc/svc_xprt.c
net/sunrpc/xprtsock.c
net/tipc/crypto.c
net/tipc/link.c
net/tipc/socket.c
net/unix/af_unix.c
net/vmw_vsock/virtio_transport.c
net/vmw_vsock/virtio_transport_common.c
net/wireless/nl80211.c
net/wireless/scan.c
net/xfrm/xfrm_compat.c
net/xfrm/xfrm_ipcomp.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_user.c
scripts/atomic/check-atomics.sh
scripts/atomic/fallbacks/acquire
scripts/atomic/fallbacks/add_negative
scripts/atomic/fallbacks/add_unless
scripts/atomic/fallbacks/andnot
scripts/atomic/fallbacks/dec
scripts/atomic/fallbacks/dec_and_test
scripts/atomic/fallbacks/dec_if_positive
scripts/atomic/fallbacks/dec_unless_positive
scripts/atomic/fallbacks/fence
scripts/atomic/fallbacks/fetch_add_unless
scripts/atomic/fallbacks/inc
scripts/atomic/fallbacks/inc_and_test
scripts/atomic/fallbacks/inc_not_zero
scripts/atomic/fallbacks/inc_unless_negative
scripts/atomic/fallbacks/read_acquire
scripts/atomic/fallbacks/release
scripts/atomic/fallbacks/set_release
scripts/atomic/fallbacks/sub_and_test
scripts/atomic/fallbacks/try_cmpxchg
scripts/atomic/gen-atomic-fallback.sh
scripts/atomic/gen-atomic-instrumented.sh
scripts/atomic/gen-atomic-long.sh
scripts/atomic/gen-atomics.sh
scripts/checkversion.pl
scripts/recordmcount.pl
scripts/tracing/draw_functrace.py
security/integrity/platform_certs/efi_parser.c
security/security.c
security/selinux/ss/policydb.c
sound/core/memalloc.c
sound/core/pcm_native.c
sound/core/seq/seq_ports.c
sound/firewire/oxfw/oxfw-stream.c
sound/firewire/oxfw/oxfw.c
sound/firewire/oxfw/oxfw.h
sound/pci/hda/hda_generic.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_via.c
sound/soc/Kconfig
sound/soc/amd/acp-da7219-max98357a.c
sound/soc/amd/acp-pcm-dma.c
sound/soc/amd/raven/acp3x-pcm-dma.c
sound/soc/amd/renoir/acp3x-pdm-dma.c
sound/soc/amd/renoir/rn-pci-acp3x.c
sound/soc/codecs/Kconfig
sound/soc/codecs/Makefile
sound/soc/codecs/cs42l42.c
sound/soc/codecs/cs42l42.h
sound/soc/codecs/nau8824.c
sound/soc/codecs/rt5682.c
sound/soc/codecs/tlv320aic31xx.c
sound/soc/codecs/tlv320aic32x4.c
sound/soc/codecs/wm_adsp.c
sound/soc/intel/atom/sst-mfld-platform-pcm.c
sound/soc/intel/boards/sof_da7219_max98373.c
sound/soc/kirkwood/kirkwood-dma.c
sound/soc/soc-component.c
sound/soc/sof/intel/Kconfig
sound/soc/sof/intel/hda-ipc.c
sound/soc/sof/intel/hda.c
sound/soc/uniphier/aio-dma.c
sound/soc/xilinx/xlnx_formatter_pcm.c
sound/usb/card.c
sound/usb/clock.c
sound/usb/mixer.c
sound/usb/mixer_scarlett_gen2.c
sound/usb/quirks.c
tools/include/nolibc/nolibc.h
tools/io_uring/io_uring-cp.c
tools/lib/bpf/btf.c
tools/lib/bpf/libbpf_probes.c
tools/perf/util/cs-etm.c
tools/perf/util/map.c
tools/perf/util/pmu.c
tools/testing/nvdimm/test/nfit.c
tools/testing/selftests/bpf/verifier/dead_code.c
tools/testing/selftests/bpf/verifier/value_ptr_arith.c
tools/testing/selftests/kvm/.gitignore
tools/testing/selftests/kvm/Makefile
tools/testing/selftests/kvm/aarch64/get-reg-list.c
tools/testing/selftests/kvm/access_tracking_perf_test.c [new file with mode: 0644]
tools/testing/selftests/kvm/dirty_log_perf_test.c
tools/testing/selftests/kvm/include/x86_64/hyperv.h
tools/testing/selftests/kvm/steal_time.c
tools/testing/selftests/kvm/x86_64/hyperv_clock.c
tools/testing/selftests/kvm/x86_64/hyperv_features.c
tools/testing/selftests/net/ipsec.c
tools/testing/selftests/rcutorture/bin/jitter.sh
tools/testing/selftests/rcutorture/bin/kcsan-collapse.sh
tools/testing/selftests/rcutorture/bin/kvm-again.sh
tools/testing/selftests/rcutorture/bin/kvm-assign-cpus.sh [new file with mode: 0755]
tools/testing/selftests/rcutorture/bin/kvm-get-cpus-script.sh [new file with mode: 0755]
tools/testing/selftests/rcutorture/bin/kvm-recheck-lock.sh
tools/testing/selftests/rcutorture/bin/kvm-recheck-scf.sh
tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
tools/testing/selftests/rcutorture/bin/kvm-remote-noreap.sh [new file with mode: 0755]
tools/testing/selftests/rcutorture/bin/kvm-remote.sh
tools/testing/selftests/rcutorture/bin/kvm-test-1-run-batch.sh
tools/testing/selftests/rcutorture/bin/kvm-test-1-run-qemu.sh
tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
tools/testing/selftests/rcutorture/bin/kvm.sh
tools/testing/selftests/rcutorture/bin/torture.sh
tools/testing/selftests/rcutorture/configs/rcu/RUDE01
tools/testing/selftests/rcutorture/configs/rcu/TASKS01
tools/testing/selftests/rcutorture/configs/rcu/TASKS03
tools/testing/selftests/sgx/sigstruct.c
tools/virtio/Makefile
tools/virtio/linux/spinlock.h [new file with mode: 0644]
tools/virtio/linux/virtio.h
virt/kvm/kvm_main.c

index e34cdee..a0ed873 100644 (file)
@@ -28,6 +28,18 @@ Description:
                For more details refer Documentation/admin-guide/iostats.rst
 
 
+What:          /sys/block/<disk>/diskseq
+Date:          February 2021
+Contact:       Matteo Croce <mcroce@microsoft.com>
+Description:
+               The /sys/block/<disk>/diskseq files reports the disk
+               sequence number, which is a monotonically increasing
+               number assigned to every drive.
+               Some devices, like the loop device, refresh such number
+               every time the backing file is changed.
+               The value type is 64 bit unsigned.
+
+
 What:          /sys/block/<disk>/<part>/stat
 Date:          February 2008
 Contact:       Jerome Marchand <jmarchan@redhat.com>
diff --git a/Documentation/ABI/testing/sysfs-bus-event_source-devices-uncore b/Documentation/ABI/testing/sysfs-bus-event_source-devices-uncore
new file mode 100644 (file)
index 0000000..b56e8f0
--- /dev/null
@@ -0,0 +1,13 @@
+What:          /sys/bus/event_source/devices/uncore_*/alias
+Date:          June 2021
+KernelVersion: 5.15
+Contact:       Linux kernel mailing list <linux-kernel@vger.kernel.org>
+Description:   Read-only.  An attribute to describe the alias name of
+               the uncore PMU if an alias exists on some platforms.
+               The 'perf(1)' tool should treat both names the same.
+               They both can be used to access the uncore PMU.
+
+               Example:
+
+               $ cat /sys/devices/uncore_cha_2/alias
+               uncore_type_0_2
index 194ca70..ff30728 100644 (file)
@@ -28,3 +28,17 @@ Description:
                value comes from an ACPI _PXM method or a similar firmware
                source. Initial users for this file would be devices like
                arm smmu which are populated by arm64 acpi_iort.
+
+What:          /sys/bus/platform/devices/.../msi_irqs/
+Date:          August 2021
+Contact:       Barry Song <song.bao.hua@hisilicon.com>
+Description:
+               The /sys/devices/.../msi_irqs directory contains a variable set
+               of files, with each file being named after a corresponding msi
+               irq vector allocated to that device.
+
+What:          /sys/bus/platform/devices/.../msi_irqs/<N>
+Date:          August 2021
+Contact:       Barry Song <song.bao.hua@hisilicon.com>
+Description:
+               This attribute will show "msi" if <N> is a valid msi irq
index 11cdab0..eeb3512 100644 (file)
@@ -112,6 +112,35 @@ on PowerPC.
 The ``smp_mb__after_unlock_lock()`` invocations prevent this
 ``WARN_ON()`` from triggering.
 
++-----------------------------------------------------------------------+
+| **Quick Quiz**:                                                       |
++-----------------------------------------------------------------------+
+| But the chain of rcu_node-structure lock acquisitions guarantees      |
+| that new readers will see all of the updater's pre-grace-period       |
+| accesses and also guarantees that the updater's post-grace-period     |
+| accesses will see all of the old reader's accesses.  So why do we     |
+| need all of those calls to smp_mb__after_unlock_lock()?               |
++-----------------------------------------------------------------------+
+| **Answer**:                                                           |
++-----------------------------------------------------------------------+
+| Because we must provide ordering for RCU's polling grace-period       |
+| primitives, for example, get_state_synchronize_rcu() and              |
+| poll_state_synchronize_rcu().  Consider this code::                   |
+|                                                                       |
+|  CPU 0                                     CPU 1                      |
+|  ----                                      ----                       |
+|  WRITE_ONCE(X, 1)                          WRITE_ONCE(Y, 1)           |
+|  g = get_state_synchronize_rcu()           smp_mb()                   |
+|  while (!poll_state_synchronize_rcu(g))    r1 = READ_ONCE(X)          |
+|          continue;                                                    |
+|  r0 = READ_ONCE(Y)                                                    |
+|                                                                       |
+| RCU guarantees that the outcome r0 == 0 && r1 == 0 will not           |
+| happen, even if CPU 1 is in an RCU extended quiescent state           |
+| (idle or offline) and thus won't interact directly with the RCU       |
+| core processing at all.                                               |
++-----------------------------------------------------------------------+
+
 This approach must be extended to include idle CPUs, which need
 RCU's grace-period memory ordering guarantee to extend to any
 RCU read-side critical sections preceding and following the current
index 38a3947..45278e2 100644 (file)
@@ -362,9 +362,8 @@ do_something_gp() uses rcu_dereference() to fetch from ``gp``:
       12 }
 
 The rcu_dereference() uses volatile casts and (for DEC Alpha) memory
-barriers in the Linux kernel. Should a `high-quality implementation of
-C11 ``memory_order_consume``
-[PDF] <http://www.rdrop.com/users/paulmck/RCU/consume.2015.07.13a.pdf>`__
+barriers in the Linux kernel. Should a |high-quality implementation of
+C11 memory_order_consume [PDF]|_
 ever appear, then rcu_dereference() could be implemented as a
 ``memory_order_consume`` load. Regardless of the exact implementation, a
 pointer fetched by rcu_dereference() may not be used outside of the
@@ -374,6 +373,9 @@ element has been passed from RCU to some other synchronization
 mechanism, most commonly locking or `reference
 counting <https://www.kernel.org/doc/Documentation/RCU/rcuref.txt>`__.
 
+.. |high-quality implementation of C11 memory_order_consume [PDF]| replace:: high-quality implementation of C11 ``memory_order_consume`` [PDF]
+.. _high-quality implementation of C11 memory_order_consume [PDF]: http://www.rdrop.com/users/paulmck/RCU/consume.2015.07.13a.pdf
+
 In short, updaters use rcu_assign_pointer() and readers use
 rcu_dereference(), and these two RCU API elements work together to
 ensure that readers have a consistent view of newly added data elements.
index 01cc21f..f4545b7 100644 (file)
@@ -37,7 +37,7 @@ over a rather long period of time, but improvements are always welcome!
 
 1.     Does the update code have proper mutual exclusion?
 
-       RCU does allow -readers- to run (almost) naked, but -writers- must
+       RCU does allow *readers* to run (almost) naked, but *writers* must
        still use some sort of mutual exclusion, such as:
 
        a.      locking,
@@ -73,7 +73,7 @@ over a rather long period of time, but improvements are always welcome!
        critical section is every bit as bad as letting them leak out
        from under a lock.  Unless, of course, you have arranged some
        other means of protection, such as a lock or a reference count
-       -before- letting them out of the RCU read-side critical section.
+       *before* letting them out of the RCU read-side critical section.
 
 3.     Does the update code tolerate concurrent accesses?
 
@@ -101,7 +101,7 @@ over a rather long period of time, but improvements are always welcome!
        c.      Make updates appear atomic to readers.  For example,
                pointer updates to properly aligned fields will
                appear atomic, as will individual atomic primitives.
-               Sequences of operations performed under a lock will -not-
+               Sequences of operations performed under a lock will *not*
                appear to be atomic to RCU readers, nor will sequences
                of multiple atomic primitives.
 
@@ -333,7 +333,7 @@ over a rather long period of time, but improvements are always welcome!
        for example) may be omitted.
 
 10.    Conversely, if you are in an RCU read-side critical section,
-       and you don't hold the appropriate update-side lock, you -must-
+       and you don't hold the appropriate update-side lock, you *must*
        use the "_rcu()" variants of the list macros.  Failing to do so
        will break Alpha, cause aggressive compilers to generate bad code,
        and confuse people trying to read your code.
@@ -359,12 +359,12 @@ over a rather long period of time, but improvements are always welcome!
        callback pending, then that RCU callback will execute on some
        surviving CPU.  (If this was not the case, a self-spawning RCU
        callback would prevent the victim CPU from ever going offline.)
-       Furthermore, CPUs designated by rcu_nocbs= might well -always-
+       Furthermore, CPUs designated by rcu_nocbs= might well *always*
        have their RCU callbacks executed on some other CPUs, in fact,
        for some  real-time workloads, this is the whole point of using
        the rcu_nocbs= kernel boot parameter.
 
-13.    Unlike other forms of RCU, it -is- permissible to block in an
+13.    Unlike other forms of RCU, it *is* permissible to block in an
        SRCU read-side critical section (demarked by srcu_read_lock()
        and srcu_read_unlock()), hence the "SRCU": "sleepable RCU".
        Please note that if you don't need to sleep in read-side critical
@@ -411,16 +411,16 @@ over a rather long period of time, but improvements are always welcome!
 14.    The whole point of call_rcu(), synchronize_rcu(), and friends
        is to wait until all pre-existing readers have finished before
        carrying out some otherwise-destructive operation.  It is
-       therefore critically important to -first- remove any path
+       therefore critically important to *first* remove any path
        that readers can follow that could be affected by the
-       destructive operation, and -only- -then- invoke call_rcu(),
+       destructive operation, and *only then* invoke call_rcu(),
        synchronize_rcu(), or friends.
 
        Because these primitives only wait for pre-existing readers, it
        is the caller's responsibility to guarantee that any subsequent
        readers will execute safely.
 
-15.    The various RCU read-side primitives do -not- necessarily contain
+15.    The various RCU read-side primitives do *not* necessarily contain
        memory barriers.  You should therefore plan for the CPU
        and the compiler to freely reorder code into and out of RCU
        read-side critical sections.  It is the responsibility of the
@@ -459,8 +459,8 @@ over a rather long period of time, but improvements are always welcome!
        pass in a function defined within a loadable module, then it in
        necessary to wait for all pending callbacks to be invoked after
        the last invocation and before unloading that module.  Note that
-       it is absolutely -not- sufficient to wait for a grace period!
-       The current (say) synchronize_rcu() implementation is -not-
+       it is absolutely *not* sufficient to wait for a grace period!
+       The current (say) synchronize_rcu() implementation is *not*
        guaranteed to wait for callbacks registered on other CPUs.
        Or even on the current CPU if that CPU recently went offline
        and came back online.
@@ -470,7 +470,7 @@ over a rather long period of time, but improvements are always welcome!
        -       call_rcu() -> rcu_barrier()
        -       call_srcu() -> srcu_barrier()
 
-       However, these barrier functions are absolutely -not- guaranteed
+       However, these barrier functions are absolutely *not* guaranteed
        to wait for a grace period.  In fact, if there are no call_rcu()
        callbacks waiting anywhere in the system, rcu_barrier() is within
        its rights to return immediately.
index f3e587a..0b418a5 100644 (file)
@@ -43,7 +43,7 @@ Follow these rules to keep your RCU code working properly:
        -       Set bits and clear bits down in the must-be-zero low-order
                bits of that pointer.  This clearly means that the pointer
                must have alignment constraints, for example, this does
-               -not- work in general for char* pointers.
+               *not* work in general for char* pointers.
 
        -       XOR bits to translate pointers, as is done in some
                classic buddy-allocator algorithms.
@@ -174,7 +174,7 @@ Follow these rules to keep your RCU code working properly:
                Please see the "CONTROL DEPENDENCIES" section of
                Documentation/memory-barriers.txt for more details.
 
-       -       The pointers are not equal -and- the compiler does
+       -       The pointers are not equal *and* the compiler does
                not have enough information to deduce the value of the
                pointer.  Note that the volatile cast in rcu_dereference()
                will normally prevent the compiler from knowing too much.
@@ -360,7 +360,7 @@ in turn destroying the ordering between this load and the loads of the
 return values.  This can result in "p->b" returning pre-initialization
 garbage values.
 
-In short, rcu_dereference() is -not- optional when you are going to
+In short, rcu_dereference() is *not* optional when you are going to
 dereference the resulting pointer.
 
 
index 7148e9b..5036df2 100644 (file)
@@ -32,7 +32,7 @@ warnings:
 
 -      Booting Linux using a console connection that is too slow to
        keep up with the boot-time console-message rate.  For example,
-       a 115Kbaud serial console can be -way- too slow to keep up
+       a 115Kbaud serial console can be *way* too slow to keep up
        with boot-time message rates, and will frequently result in
        RCU CPU stall warning messages.  Especially if you have added
        debug printk()s.
@@ -105,7 +105,7 @@ warnings:
        leading the realization that the CPU had failed.
 
 The RCU, RCU-sched, and RCU-tasks implementations have CPU stall warning.
-Note that SRCU does -not- have CPU stall warnings.  Please note that
+Note that SRCU does *not* have CPU stall warnings.  Please note that
 RCU only detects CPU stalls when there is a grace period in progress.
 No grace period, no CPU stall warnings.
 
@@ -145,7 +145,7 @@ CONFIG_RCU_CPU_STALL_TIMEOUT
        this parameter is checked only at the beginning of a cycle.
        So if you are 10 seconds into a 40-second stall, setting this
        sysfs parameter to (say) five will shorten the timeout for the
-       -next- stall, or the following warning for the current stall
+       *next* stall, or the following warning for the current stall
        (assuming the stall lasts long enough).  It will not affect the
        timing of the next warning for the current stall.
 
@@ -189,8 +189,8 @@ rcupdate.rcu_task_stall_timeout
 Interpreting RCU's CPU Stall-Detector "Splats"
 ==============================================
 
-For non-RCU-tasks flavors of RCU, when a CPU detects that it is stalling,
-it will print a message similar to the following::
+For non-RCU-tasks flavors of RCU, when a CPU detects that some other
+CPU is stalling, it will print a message similar to the following::
 
        INFO: rcu_sched detected stalls on CPUs/tasks:
        2-...: (3 GPs behind) idle=06c/0/0 softirq=1453/1455 fqs=0
@@ -202,8 +202,10 @@ causing stalls, and that the stall was affecting RCU-sched.  This message
 will normally be followed by stack dumps for each CPU.  Please note that
 PREEMPT_RCU builds can be stalled by tasks as well as by CPUs, and that
 the tasks will be indicated by PID, for example, "P3421".  It is even
-possible for an rcu_state stall to be caused by both CPUs -and- tasks,
+possible for an rcu_state stall to be caused by both CPUs *and* tasks,
 in which case the offending CPUs and tasks will all be called out in the list.
+In some cases, CPUs will detect themselves stalling, which will result
+in a self-detected stall.
 
 CPU 2's "(3 GPs behind)" indicates that this CPU has not interacted with
 the RCU core for the past three grace periods.  In contrast, CPU 16's "(0
@@ -224,7 +226,7 @@ is the number that had executed since boot at the time that this CPU
 last noted the beginning of a grace period, which might be the current
 (stalled) grace period, or it might be some earlier grace period (for
 example, if the CPU might have been in dyntick-idle mode for an extended
-time period.  The number after the "/" is the number that have executed
+time period).  The number after the "/" is the number that have executed
 since boot until the current time.  If this latter number stays constant
 across repeated stall-warning messages, it is possible that RCU's softirq
 handlers are no longer able to execute on this CPU.  This can happen if
@@ -283,7 +285,8 @@ If the relevant grace-period kthread has been unable to run prior to
 the stall warning, as was the case in the "All QSes seen" line above,
 the following additional line is printed::
 
-       kthread starved for 23807 jiffies! g7075 f0x0 RCU_GP_WAIT_FQS(3) ->state=0x1 ->cpu=5
+       rcu_sched kthread starved for 23807 jiffies! g7075 f0x0 RCU_GP_WAIT_FQS(3) ->state=0x1 ->cpu=5
+       Unless rcu_sched kthread gets sufficient CPU time, OOM is now expected behavior.
 
 Starving the grace-period kthreads of CPU time can of course result
 in RCU CPU stall warnings even when all CPUs and tasks have passed
@@ -313,15 +316,21 @@ is the current ``TIMER_SOFTIRQ`` count on cpu 4.  If this value does not
 change on successive RCU CPU stall warnings, there is further reason to
 suspect a timer problem.
 
+These messages are usually followed by stack dumps of the CPUs and tasks
+involved in the stall.  These stack traces can help you locate the cause
+of the stall, keeping in mind that the CPU detecting the stall will have
+an interrupt frame that is mainly devoted to detecting the stall.
+
 
 Multiple Warnings From One Stall
 ================================
 
-If a stall lasts long enough, multiple stall-warning messages will be
-printed for it.  The second and subsequent messages are printed at
+If a stall lasts long enough, multiple stall-warning messages will
+be printed for it.  The second and subsequent messages are printed at
 longer intervals, so that the time between (say) the first and second
 message will be about three times the interval between the beginning
-of the stall and the first message.
+of the stall and the first message.  It can be helpful to compare the
+stack dumps for the different messages for the same stalled grace period.
 
 
 Stall Warnings for Expedited Grace Periods
index f12cda5..8cbc711 100644 (file)
@@ -16,3 +16,4 @@ are configurable at compile, boot or run time.
    multihit.rst
    special-register-buffer-data-sampling.rst
    core-scheduling.rst
+   l1d_flush.rst
diff --git a/Documentation/admin-guide/hw-vuln/l1d_flush.rst b/Documentation/admin-guide/hw-vuln/l1d_flush.rst
new file mode 100644 (file)
index 0000000..210020b
--- /dev/null
@@ -0,0 +1,69 @@
+L1D Flushing
+============
+
+With an increasing number of vulnerabilities being reported around data
+leaks from the Level 1 Data cache (L1D) the kernel provides an opt-in
+mechanism to flush the L1D cache on context switch.
+
+This mechanism can be used to address e.g. CVE-2020-0550. For applications
+the mechanism keeps them safe from vulnerabilities, related to leaks
+(snooping of) from the L1D cache.
+
+
+Related CVEs
+------------
+The following CVEs can be addressed by this
+mechanism
+
+    =============       ========================     ==================
+    CVE-2020-0550       Improper Data Forwarding     OS related aspects
+    =============       ========================     ==================
+
+Usage Guidelines
+----------------
+
+Please see document: :ref:`Documentation/userspace-api/spec_ctrl.rst
+<set_spec_ctrl>` for details.
+
+**NOTE**: The feature is disabled by default, applications need to
+specifically opt into the feature to enable it.
+
+Mitigation
+----------
+
+When PR_SET_L1D_FLUSH is enabled for a task a flush of the L1D cache is
+performed when the task is scheduled out and the incoming task belongs to a
+different process and therefore to a different address space.
+
+If the underlying CPU supports L1D flushing in hardware, the hardware
+mechanism is used, software fallback for the mitigation, is not supported.
+
+Mitigation control on the kernel command line
+---------------------------------------------
+
+The kernel command line allows to control the L1D flush mitigations at boot
+time with the option "l1d_flush=". The valid arguments for this option are:
+
+  ============  =============================================================
+  on            Enables the prctl interface, applications trying to use
+                the prctl() will fail with an error if l1d_flush is not
+                enabled
+  ============  =============================================================
+
+By default the mechanism is disabled.
+
+Limitations
+-----------
+
+The mechanism does not mitigate L1D data leaks between tasks belonging to
+different processes which are concurrently executing on sibling threads of
+a physical CPU core when SMT is enabled on the system.
+
+This can be addressed by controlled placement of processes on physical CPU
+cores or by disabling SMT. See the relevant chapter in the L1TF mitigation
+document: :ref:`Documentation/admin-guide/hw-vuln/l1tf.rst <smt_control>`.
+
+**NOTE** : The opt-in of a task for L1D flushing works only when the task's
+affinity is limited to cores running in non-SMT mode. If a task which
+requested L1D flushing is scheduled on a SMT-enabled core the kernel sends
+a SIGBUS to the task.
index bdb2200..56bd70e 100644 (file)
                        feature (tagged TLBs) on capable Intel chips.
                        Default is 1 (enabled)
 
+       l1d_flush=      [X86,INTEL]
+                       Control mitigation for L1D based snooping vulnerability.
+
+                       Certain CPUs are vulnerable to an exploit against CPU
+                       internal buffers which can forward information to a
+                       disclosure gadget under certain conditions.
+
+                       In vulnerable processors, the speculatively
+                       forwarded data can be used in a cache side channel
+                       attack, to access data to which the attacker does
+                       not have direct access.
+
+                       This parameter controls the mitigation. The
+                       options are:
+
+                       on         - enable the interface for the mitigation
+
        l1tf=           [X86] Control mitigation of the L1TF vulnerability on
                              affected CPUs
 
 
        reboot=         [KNL]
                        Format (x86 or x86_64):
-                               [w[arm] | c[old] | h[ard] | s[oft] | g[pio]] \
+                               [w[arm] | c[old] | h[ard] | s[oft] | g[pio]] | d[efault] \
                                [[,]s[mp]#### \
                                [[,]b[ios] | a[cpi] | k[bd] | t[riple] | e[fi] | p[ci]] \
                                [[,]f[orce]
index 0f1fded..0f1ffa0 100644 (file)
@@ -271,3 +271,97 @@ WRITE_ONCE.  Thus:
                        SC *y, t;
 
 is allowed.
+
+
+CMPXCHG vs TRY_CMPXCHG
+----------------------
+
+  int atomic_cmpxchg(atomic_t *ptr, int old, int new);
+  bool atomic_try_cmpxchg(atomic_t *ptr, int *oldp, int new);
+
+Both provide the same functionality, but try_cmpxchg() can lead to more
+compact code. The functions relate like:
+
+  bool atomic_try_cmpxchg(atomic_t *ptr, int *oldp, int new)
+  {
+    int ret, old = *oldp;
+    ret = atomic_cmpxchg(ptr, old, new);
+    if (ret != old)
+      *oldp = ret;
+    return ret == old;
+  }
+
+and:
+
+  int atomic_cmpxchg(atomic_t *ptr, int old, int new)
+  {
+    (void)atomic_try_cmpxchg(ptr, &old, new);
+    return old;
+  }
+
+Usage:
+
+  old = atomic_read(&v);                       old = atomic_read(&v);
+  for (;;) {                                   do {
+    new = func(old);                             new = func(old);
+    tmp = atomic_cmpxchg(&v, old, new);                } while (!atomic_try_cmpxchg(&v, &old, new));
+    if (tmp == old)
+      break;
+    old = tmp;
+  }
+
+NB. try_cmpxchg() also generates better code on some platforms (notably x86)
+where the function more closely matches the hardware instruction.
+
+
+FORWARD PROGRESS
+----------------
+
+In general strong forward progress is expected of all unconditional atomic
+operations -- those in the Arithmetic and Bitwise classes and xchg(). However
+a fair amount of code also requires forward progress from the conditional
+atomic operations.
+
+Specifically 'simple' cmpxchg() loops are expected to not starve one another
+indefinitely. However, this is not evident on LL/SC architectures, because
+while an LL/SC architecure 'can/should/must' provide forward progress
+guarantees between competing LL/SC sections, such a guarantee does not
+transfer to cmpxchg() implemented using LL/SC. Consider:
+
+  old = atomic_read(&v);
+  do {
+    new = func(old);
+  } while (!atomic_try_cmpxchg(&v, &old, new));
+
+which on LL/SC becomes something like:
+
+  old = atomic_read(&v);
+  do {
+    new = func(old);
+  } while (!({
+    volatile asm ("1: LL  %[oldval], %[v]\n"
+                  "   CMP %[oldval], %[old]\n"
+                  "   BNE 2f\n"
+                  "   SC  %[new], %[v]\n"
+                  "   BNE 1b\n"
+                  "2:\n"
+                  : [oldval] "=&r" (oldval), [v] "m" (v)
+                 : [old] "r" (old), [new] "r" (new)
+                  : "memory");
+    success = (oldval == old);
+    if (!success)
+      old = oldval;
+    success; }));
+
+However, even the forward branch from the failed compare can cause the LL/SC
+to fail on some architectures, let alone whatever the compiler makes of the C
+loop body. As a result there is no guarantee what so ever the cacheline
+containing @v will stay on the local CPU and progress is made.
+
+Even native CAS architectures can fail to provide forward progress for their
+primitive (See Sparc64 for an example).
+
+Such implementations are strongly encouraged to add exponential backoff loops
+to a failed CAS in order to ensure some progress. Affected architectures are
+also strongly encouraged to inspect/audit the atomic fallbacks, refcount_t and
+their locking primitives.
index 3de1d51..6bf9c5a 100644 (file)
@@ -108,7 +108,7 @@ This bump in ABI version is at most once per kernel development cycle.
 
 For example, if current state of ``libbpf.map`` is:
 
-.. code-block:: c
+.. code-block:: none
 
         LIBBPF_0.0.1 {
                global:
@@ -121,7 +121,7 @@ For example, if current state of ``libbpf.map`` is:
 , and a new symbol ``bpf_func_c`` is being introduced, then
 ``libbpf.map`` should be changed like this:
 
-.. code-block:: c
+.. code-block:: none
 
         LIBBPF_0.0.1 {
                global:
index a2c96be..1122cd3 100644 (file)
@@ -220,7 +220,7 @@ goes online (offline) and during initial setup (shutdown) of the driver. However
 each registration and removal function is also available with a ``_nocalls``
 suffix which does not invoke the provided callbacks if the invocation of the
 callbacks is not desired. During the manual setup (or teardown) the functions
-``get_online_cpus()`` and ``put_online_cpus()`` should be used to inhibit CPU
+``cpus_read_lock()`` and ``cpus_read_unlock()`` should be used to inhibit CPU
 hotplug operations.
 
 
index 53283b3..6979b4a 100644 (file)
@@ -55,8 +55,24 @@ exist then it will allocate a new Linux irq_desc, associate it with
 the hwirq, and call the .map() callback so the driver can perform any
 required hardware setup.
 
-When an interrupt is received, irq_find_mapping() function should
-be used to find the Linux IRQ number from the hwirq number.
+Once a mapping has been established, it can be retrieved or used via a
+variety of methods:
+
+- irq_resolve_mapping() returns a pointer to the irq_desc structure
+  for a given domain and hwirq number, and NULL if there was no
+  mapping.
+- irq_find_mapping() returns a Linux IRQ number for a given domain and
+  hwirq number, and 0 if there was no mapping
+- irq_linear_revmap() is now identical to irq_find_mapping(), and is
+  deprecated
+- generic_handle_domain_irq() handles an interrupt described by a
+  domain and a hwirq number
+- handle_domain_irq() does the same thing for root interrupt
+  controllers and deals with the set_irq_reg()/irq_enter() sequences
+  that most architecture requires
+
+Note that irq domain lookups must happen in contexts that are
+compatible with a RCU read-side critical section.
 
 The irq_create_mapping() function must be called *atleast once*
 before any call to irq_find_mapping(), lest the descriptor will not
@@ -137,7 +153,9 @@ required.  Calling irq_create_direct_mapping() will allocate a Linux
 IRQ number and call the .map() callback so that driver can program the
 Linux IRQ number into the hardware.
 
-Most drivers cannot use this mapping.
+Most drivers cannot use this mapping, and it is now gated on the
+CONFIG_IRQ_DOMAIN_NOMAP option. Please refrain from introducing new
+users of this API.
 
 Legacy
 ------
@@ -157,6 +175,10 @@ for IRQ numbers that are passed to struct device registrations.  In that
 case the Linux IRQ numbers cannot be dynamically assigned and the legacy
 mapping should be used.
 
+As the name implies, the *_legacy() functions are deprecated and only
+exist to ease the support of ancient platforms. No new users should be
+added.
+
 The legacy map assumes a contiguous range of IRQ numbers has already
 been allocated for the controller and that the IRQ number can be
 calculated by adding a fixed offset to the hwirq number, and
index e425278..e2ca0b0 100644 (file)
@@ -19,7 +19,6 @@ properties:
   compatible:
     enum:
       - ibm,fsi2spi
-      - ibm,fsi2spi-restricted
 
   reg:
     items:
index d993e00..0d62c28 100644 (file)
@@ -22,7 +22,10 @@ properties:
     maxItems: 1
 
   clocks:
-    maxItems: 1
+    minItems: 1
+    items:
+      - description: APB interface clock source
+      - description: GPIO debounce reference clock source
 
   gpio-controller: true
 
index b2a1e42..71de563 100644 (file)
@@ -152,47 +152,6 @@ allOf:
           maxItems: 1
         st,drdy-int-pin: false
 
-  - if:
-      properties:
-        compatible:
-          enum:
-            # Two intertial interrupts i.e. accelerometer/gyro interrupts
-            - st,h3lis331dl-accel
-            - st,l3g4200d-gyro
-            - st,l3g4is-gyro
-            - st,l3gd20-gyro
-            - st,l3gd20h-gyro
-            - st,lis2de12
-            - st,lis2dw12
-            - st,lis2hh12
-            - st,lis2dh12-accel
-            - st,lis331dl-accel
-            - st,lis331dlh-accel
-            - st,lis3de
-            - st,lis3dh-accel
-            - st,lis3dhh
-            - st,lis3mdl-magn
-            - st,lng2dm-accel
-            - st,lps331ap-press
-            - st,lsm303agr-accel
-            - st,lsm303dlh-accel
-            - st,lsm303dlhc-accel
-            - st,lsm303dlm-accel
-            - st,lsm330-accel
-            - st,lsm330-gyro
-            - st,lsm330d-accel
-            - st,lsm330d-gyro
-            - st,lsm330dl-accel
-            - st,lsm330dl-gyro
-            - st,lsm330dlc-accel
-            - st,lsm330dlc-gyro
-            - st,lsm9ds0-gyro
-            - st,lsm9ds1-magn
-    then:
-      properties:
-        interrupts:
-          maxItems: 2
-
 required:
   - compatible
   - reg
index c3b4b75..d56ac48 100644 (file)
@@ -31,6 +31,20 @@ properties:
   compatible:
     const: simple-battery
 
+  device-chemistry:
+    description: This describes the chemical technology of the battery.
+    oneOf:
+      - const: nickel-cadmium
+      - const: nickel-metal-hydride
+      - const: lithium-ion
+        description: This is a blanket type for all lithium-ion batteries,
+          including those below. If possible, a precise compatible string
+          from below should be used, but sometimes it is unknown which specific
+          lithium ion battery is employed and this wide compatible can be used.
+      - const: lithium-ion-polymer
+      - const: lithium-ion-iron-phosphate
+      - const: lithium-ion-manganese-oxide
+
   over-voltage-threshold-microvolt:
     description: battery over-voltage limit
 
index c70f05e..971b53c 100644 (file)
@@ -19,12 +19,15 @@ properties:
       - maxim,max17047
       - maxim,max17050
       - maxim,max17055
+      - maxim,max77849-battery
 
   reg:
     maxItems: 1
 
   interrupts:
     maxItems: 1
+    description: |
+      The ALRT pin, an open-drain interrupt.
 
   maxim,rsns-microohm:
     $ref: /schemas/types.yaml#/definitions/uint32
diff --git a/Documentation/devicetree/bindings/power/supply/mt6360_charger.yaml b/Documentation/devicetree/bindings/power/supply/mt6360_charger.yaml
new file mode 100644 (file)
index 0000000..b89b15a
--- /dev/null
@@ -0,0 +1,48 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/power/supply/mt6360_charger.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Battery charger driver for MT6360 PMIC from MediaTek Integrated.
+
+maintainers:
+  - Gene Chen <gene_chen@richtek.com>
+
+description: |
+  This module is part of the MT6360 MFD device.
+  Provides Battery Charger, Boost for OTG devices and BC1.2 detection.
+
+properties:
+  compatible:
+    const: mediatek,mt6360-chg
+
+  richtek,vinovp-microvolt:
+    description: Maximum CHGIN regulation voltage in uV.
+    enum: [ 5500000, 6500000, 11000000, 14500000 ]
+
+
+  usb-otg-vbus-regulator:
+    type: object
+    description: OTG boost regulator.
+    $ref: /schemas/regulator/regulator.yaml#
+
+required:
+  - compatible
+
+additionalProperties: false
+
+examples:
+  - |
+    mt6360_charger: charger {
+      compatible = "mediatek,mt6360-chg";
+      richtek,vinovp-microvolt = <14500000>;
+
+      otg_vbus_regulator: usb-otg-vbus-regulator {
+        regulator-compatible = "usb-otg-vbus";
+        regulator-name = "usb-otg-vbus";
+        regulator-min-microvolt = <4425000>;
+        regulator-max-microvolt = <5825000>;
+      };
+    };
+...
index 983fc21..20862cd 100644 (file)
@@ -73,6 +73,26 @@ properties:
       - 1 # SMB3XX_SOFT_TEMP_COMPENSATE_CURRENT Current compensation
       - 2 # SMB3XX_SOFT_TEMP_COMPENSATE_VOLTAGE Voltage compensation
 
+  summit,inok-polarity:
+    description: |
+      Polarity of INOK signal indicating presence of external power supply.
+    $ref: /schemas/types.yaml#/definitions/uint32
+    enum:
+      - 0 # SMB3XX_SYSOK_INOK_ACTIVE_LOW
+      - 1 # SMB3XX_SYSOK_INOK_ACTIVE_HIGH
+
+  usb-vbus:
+    $ref: "../../regulator/regulator.yaml#"
+    type: object
+
+    properties:
+      summit,needs-inok-toggle:
+        type: boolean
+        description: INOK signal is fixed and polarity needs to be toggled
+                     in order to enable/disable output mode.
+
+    unevaluatedProperties: false
+
 allOf:
   - if:
       properties:
@@ -134,6 +154,7 @@ examples:
             reg = <0x7f>;
 
             summit,enable-charge-control = <SMB3XX_CHG_ENABLE_PIN_ACTIVE_HIGH>;
+            summit,inok-polarity = <SMB3XX_SYSOK_INOK_ACTIVE_LOW>;
             summit,chip-temperature-threshold-celsius = <110>;
             summit,mains-current-limit-microamp = <2000000>;
             summit,usb-current-limit-microamp = <500000>;
@@ -141,6 +162,15 @@ examples:
             summit,enable-mains-charging;
 
             monitored-battery = <&battery>;
+
+            usb-vbus {
+                regulator-name = "usb_vbus";
+                regulator-min-microvolt = <5000000>;
+                regulator-max-microvolt = <5000000>;
+                regulator-min-microamp = <750000>;
+                regulator-max-microamp = <750000>;
+                summit,needs-inok-toggle;
+            };
         };
     };
 
index dcda666..de6a23a 100644 (file)
@@ -21,10 +21,13 @@ allOf:
 
 properties:
   compatible:
-    enum:
-      - x-powers,axp202-ac-power-supply
-      - x-powers,axp221-ac-power-supply
-      - x-powers,axp813-ac-power-supply
+    oneOf:
+      - const: x-powers,axp202-ac-power-supply
+      - const: x-powers,axp221-ac-power-supply
+      - items:
+          - const: x-powers,axp803-ac-power-supply
+          - const: x-powers,axp813-ac-power-supply
+      - const: x-powers,axp813-ac-power-supply
 
 required:
   - compatible
index 86e8a71..d055428 100644 (file)
@@ -19,10 +19,14 @@ allOf:
 
 properties:
   compatible:
-    enum:
-      - x-powers,axp209-battery-power-supply
-      - x-powers,axp221-battery-power-supply
-      - x-powers,axp813-battery-power-supply
+    oneOf:
+      - const: x-powers,axp202-battery-power-supply
+      - const: x-powers,axp209-battery-power-supply
+      - const: x-powers,axp221-battery-power-supply
+      - items:
+          - const: x-powers,axp803-battery-power-supply
+          - const: x-powers,axp813-battery-power-supply
+      - const: x-powers,axp813-battery-power-supply
 
 required:
   - compatible
index 61f1b32..0c371b5 100644 (file)
@@ -20,11 +20,15 @@ allOf:
 
 properties:
   compatible:
-    enum:
-      - x-powers,axp202-usb-power-supply
-      - x-powers,axp221-usb-power-supply
-      - x-powers,axp223-usb-power-supply
-      - x-powers,axp813-usb-power-supply
+    oneOf:
+      - enum:
+          - x-powers,axp202-usb-power-supply
+          - x-powers,axp221-usb-power-supply
+          - x-powers,axp223-usb-power-supply
+          - x-powers,axp813-usb-power-supply
+      - items:
+          - const: x-powers,axp803-usb-power-supply
+          - const: x-powers,axp813-usb-power-supply
 
 
 required:
diff --git a/Documentation/devicetree/bindings/regulator/richtek,rtq2134-regulator.yaml b/Documentation/devicetree/bindings/regulator/richtek,rtq2134-regulator.yaml
new file mode 100644 (file)
index 0000000..3f47e8e
--- /dev/null
@@ -0,0 +1,106 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/regulator/richtek,rtq2134-regulator.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Richtek RTQ2134 SubPMIC Regulator
+
+maintainers:
+  - ChiYuan Huang <cy_huang@richtek.com>
+
+description: |
+  The RTQ2134 is a multi-phase, programmable power management IC that
+  integrates with four high efficient, synchronous step-down converter cores.
+
+  Datasheet is available at
+  https://www.richtek.com/assets/product_file/RTQ2134-QA/DSQ2134-QA-01.pdf
+
+properties:
+  compatible:
+    enum:
+      - richtek,rtq2134
+
+  reg:
+    maxItems: 1
+
+  regulators:
+    type: object
+
+    patternProperties:
+      "^buck[1-3]$":
+        type: object
+        $ref: regulator.yaml#
+        description: |
+          regulator description for buck[1-3].
+
+        properties:
+          richtek,use-vsel-dvs:
+            type: boolean
+            description: |
+              If specified, buck will listen to 'vsel' pin for dvs config.
+              Else, use dvs0 voltage by default.
+
+          richtek,uv-shutdown:
+            type: boolean
+            description: |
+              If specified, use shutdown as UV action. Else, hiccup by default.
+
+        unevaluatedProperties: false
+
+    additionalProperties: false
+
+required:
+  - compatible
+  - reg
+  - regulators
+
+additionalProperties: false
+
+examples:
+  - |
+    i2c {
+      #address-cells = <1>;
+      #size-cells = <0>;
+
+      rtq2134@18 {
+        compatible = "richtek,rtq2134";
+        reg = <0x18>;
+
+        regulators {
+          buck1 {
+            regulator-name = "rtq2134-buck1";
+            regulator-min-microvolt = <300000>;
+            regulator-max-microvolt = <1850000>;
+            regulator-always-on;
+            richtek,use-vsel-dvs;
+            regulator-state-mem {
+              regulator-suspend-min-microvolt = <550000>;
+              regulator-suspend-max-microvolt = <550000>;
+            };
+          };
+          buck2 {
+            regulator-name = "rtq2134-buck2";
+            regulator-min-microvolt = <1120000>;
+            regulator-max-microvolt = <1120000>;
+            regulator-always-on;
+            richtek,use-vsel-dvs;
+            regulator-state-mem {
+              regulator-suspend-min-microvolt = <1120000>;
+              regulator-suspend-max-microvolt = <1120000>;
+            };
+          };
+          buck3 {
+            regulator-name = "rtq2134-buck3";
+            regulator-min-microvolt = <600000>;
+            regulator-max-microvolt = <600000>;
+            regulator-always-on;
+            richtek,use-vsel-dvs;
+            regulator-state-mem {
+              regulator-suspend-min-microvolt = <600000>;
+              regulator-suspend-max-microvolt = <600000>;
+            };
+          };
+        };
+      };
+    };
diff --git a/Documentation/devicetree/bindings/regulator/richtek,rtq6752-regulator.yaml b/Documentation/devicetree/bindings/regulator/richtek,rtq6752-regulator.yaml
new file mode 100644 (file)
index 0000000..e6e5a9a
--- /dev/null
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/regulator/richtek,rtq6752-regulator.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Richtek RTQ6752 TFT LCD Voltage Regulator
+
+maintainers:
+  - ChiYuan Huang <cy_huang@richtek.com>
+
+description: |
+  The RTQ6752 is an I2C interface pgorammable power management IC. It includes
+  two synchronous boost converter for PAVDD, and one synchronous NAVDD
+  buck-boost. The device is suitable for automotive TFT-LCD panel.
+
+properties:
+  compatible:
+    enum:
+      - richtek,rtq6752
+
+  reg:
+    maxItems: 1
+
+  enable-gpios:
+    description: |
+      A connection of the chip 'enable' gpio line. If not provided, treat it as
+      external pull up.
+    maxItems: 1
+
+  regulators:
+    type: object
+
+    patternProperties:
+      "^(p|n)avdd$":
+        type: object
+        $ref: regulator.yaml#
+        description: |
+          regulator description for pavdd and navdd.
+
+    additionalProperties: false
+
+required:
+  - compatible
+  - reg
+  - regulators
+
+additionalProperties: false
+
+examples:
+  - |
+    i2c {
+      #address-cells = <1>;
+      #size-cells = <0>;
+
+      rtq6752@6b {
+        compatible = "richtek,rtq6752";
+        reg = <0x6b>;
+        enable-gpios = <&gpio26 2 0>;
+
+        regulators {
+          pavdd {
+            regulator-name = "rtq6752-pavdd";
+            regulator-min-microvolt = <5000000>;
+            regulator-max-microvolt = <7300000>;
+            regulator-boot-on;
+          };
+          navdd {
+            regulator-name = "rtq6752-navdd";
+            regulator-min-microvolt = <5000000>;
+            regulator-max-microvolt = <7300000>;
+            regulator-boot-on;
+          };
+        };
+      };
+    };
diff --git a/Documentation/devicetree/bindings/regulator/socionext,uniphier-regulator.yaml b/Documentation/devicetree/bindings/regulator/socionext,uniphier-regulator.yaml
new file mode 100644 (file)
index 0000000..861d5f3
--- /dev/null
@@ -0,0 +1,85 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/regulator/socionext,uniphier-regulator.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Socionext UniPhier regulator controller
+
+description: |
+  This regulator controls VBUS and belongs to USB3 glue layer. Before using
+  the regulator, it is necessary to control the clocks and resets to enable
+  this layer. These clocks and resets should be described in each property.
+
+maintainers:
+  - Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
+
+allOf:
+  - $ref: "regulator.yaml#"
+
+# USB3 Controller
+
+properties:
+  compatible:
+    enum:
+      - socionext,uniphier-pro4-usb3-regulator
+      - socionext,uniphier-pro5-usb3-regulator
+      - socionext,uniphier-pxs2-usb3-regulator
+      - socionext,uniphier-ld20-usb3-regulator
+      - socionext,uniphier-pxs3-usb3-regulator
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    minItems: 1
+    maxItems: 2
+
+  clock-names:
+    oneOf:
+      - items:          # for Pro4, Pro5
+          - const: gio
+          - const: link
+      - items:          # for others
+          - const: link
+
+  resets:
+    minItems: 1
+    maxItems: 2
+
+  reset-names:
+    oneOf:
+      - items:          # for Pro4, Pro5
+          - const: gio
+          - const: link
+      - items:
+          - const: link
+
+additionalProperties: false
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - clock-names
+  - resets
+  - reset-names
+
+examples:
+  - |
+    usb-glue@65b00000 {
+        compatible = "simple-mfd";
+        #address-cells = <1>;
+        #size-cells = <1>;
+        ranges = <0 0x65b00000 0x400>;
+
+        usb_vbus0: regulators@100 {
+            compatible = "socionext,uniphier-ld20-usb3-regulator";
+            reg = <0x100 0x10>;
+            clock-names = "link";
+            clocks = <&sys_clk 14>;
+            reset-names = "link";
+            resets = <&sys_rst 14>;
+        };
+    };
+
diff --git a/Documentation/devicetree/bindings/regulator/uniphier-regulator.txt b/Documentation/devicetree/bindings/regulator/uniphier-regulator.txt
deleted file mode 100644 (file)
index 94fd38b..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-Socionext UniPhier Regulator Controller
-
-This describes the devicetree bindings for regulator controller implemented
-on Socionext UniPhier SoCs.
-
-USB3 Controller
----------------
-
-This regulator controls VBUS and belongs to USB3 glue layer. Before using
-the regulator, it is necessary to control the clocks and resets to enable
-this layer. These clocks and resets should be described in each property.
-
-Required properties:
-- compatible: Should be
-    "socionext,uniphier-pro4-usb3-regulator" - for Pro4 SoC
-    "socionext,uniphier-pro5-usb3-regulator" - for Pro5 SoC
-    "socionext,uniphier-pxs2-usb3-regulator" - for PXs2 SoC
-    "socionext,uniphier-ld20-usb3-regulator" - for LD20 SoC
-    "socionext,uniphier-pxs3-usb3-regulator" - for PXs3 SoC
-- reg: Specifies offset and length of the register set for the device.
-- clocks: A list of phandles to the clock gate for USB3 glue layer.
-       According to the clock-names, appropriate clocks are required.
-- clock-names: Should contain
-    "gio", "link" - for Pro4 and Pro5 SoCs
-    "link"        - for others
-- resets: A list of phandles to the reset control for USB3 glue layer.
-       According to the reset-names, appropriate resets are required.
-- reset-names: Should contain
-    "gio", "link" - for Pro4 and Pro5 SoCs
-    "link"        - for others
-
-See Documentation/devicetree/bindings/regulator/regulator.txt
-for more details about the regulator properties.
-
-Example:
-
-       usb-glue@65b00000 {
-               compatible = "socionext,uniphier-ld20-dwc3-glue",
-                            "simple-mfd";
-               #address-cells = <1>;
-               #size-cells = <1>;
-               ranges = <0 0x65b00000 0x400>;
-
-               usb_vbus0: regulators@100 {
-                       compatible = "socionext,uniphier-ld20-usb3-regulator";
-                       reg = <0x100 0x10>;
-                       clock-names = "link";
-                       clocks = <&sys_clk 14>;
-                       reset-names = "link";
-                       resets = <&sys_rst 14>;
-               };
-
-               phy {
-                       ...
-                       phy-supply = <&usb_vbus0>;
-               };
-               ...
-       };
index 1d38ff7..2b1f916 100644 (file)
@@ -24,10 +24,10 @@ allOf:
 select:
   properties:
     compatible:
-      items:
-        enum:
-            - sifive,fu540-c000-ccache
-            - sifive,fu740-c000-ccache
+      contains:
+        enum:
+          - sifive,fu540-c000-ccache
+          - sifive,fu740-c000-ccache
 
   required:
     - compatible
diff --git a/Documentation/devicetree/bindings/spi/omap-spi.txt b/Documentation/devicetree/bindings/spi/omap-spi.txt
deleted file mode 100644 (file)
index 487208c..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-OMAP2+ McSPI device
-
-Required properties:
-- compatible :
-  - "ti,am654-mcspi" for AM654.
-  - "ti,omap2-mcspi" for OMAP2 & OMAP3.
-  - "ti,omap4-mcspi" for OMAP4+.
-- ti,spi-num-cs : Number of chipselect supported  by the instance.
-- ti,hwmods: Name of the hwmod associated to the McSPI
-- ti,pindir-d0-out-d1-in: Select the D0 pin as output and D1 as
-                         input. The default is D0 as input and
-                         D1 as output.
-
-Optional properties:
-- dmas: List of DMA specifiers with the controller specific format
-       as described in the generic DMA client binding. A tx and rx
-       specifier is required for each chip select.
-- dma-names: List of DMA request names. These strings correspond
-       1:1 with the DMA specifiers listed in dmas. The string naming
-       is to be "rxN" and "txN" for RX and TX requests,
-       respectively, where N equals the chip select number.
-
-Examples:
-
-[hwmod populated DMA resources]
-
-mcspi1: mcspi@1 {
-    #address-cells = <1>;
-    #size-cells = <0>;
-    compatible = "ti,omap4-mcspi";
-    ti,hwmods = "mcspi1";
-    ti,spi-num-cs = <4>;
-};
-
-[generic DMA request binding]
-
-mcspi1: mcspi@1 {
-    #address-cells = <1>;
-    #size-cells = <0>;
-    compatible = "ti,omap4-mcspi";
-    ti,hwmods = "mcspi1";
-    ti,spi-num-cs = <2>;
-    dmas = <&edma 42
-           &edma 43
-           &edma 44
-           &edma 45>;
-    dma-names = "tx0", "rx0", "tx1", "rx1";
-};
diff --git a/Documentation/devicetree/bindings/spi/omap-spi.yaml b/Documentation/devicetree/bindings/spi/omap-spi.yaml
new file mode 100644 (file)
index 0000000..e555381
--- /dev/null
@@ -0,0 +1,117 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/spi/omap-spi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: SPI controller bindings for OMAP and K3 SoCs
+
+maintainers:
+  - Aswath Govindraju <a-govindraju@ti.com>
+
+allOf:
+  - $ref: spi-controller.yaml#
+
+properties:
+  compatible:
+    oneOf:
+      - items:
+          - enum:
+              - ti,am654-mcspi
+              - ti,am4372-mcspi
+          - const: ti,omap4-mcspi
+      - items:
+          - enum:
+              - ti,omap2-mcspi
+              - ti,omap4-mcspi
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  power-domains:
+    maxItems: 1
+
+  ti,spi-num-cs:
+    $ref: /schemas/types.yaml#/definitions/uint32
+    description: Number of chipselect supported  by the instance.
+    minimum: 1
+    maximum: 4
+
+  ti,hwmods:
+    $ref: /schemas/types.yaml#/definitions/string
+    description:
+      Must be "mcspi<n>", n being the instance number (1-based).
+      This property is applicable only on legacy platforms mainly omap2/3
+      and ti81xx and should not be used on other platforms.
+    deprecated: true
+
+  ti,pindir-d0-out-d1-in:
+    description:
+      Select the D0 pin as output and D1 as input. The default is D0
+      as input and D1 as output.
+    type: boolean
+
+  dmas:
+    description:
+      List of DMA specifiers with the controller specific format as
+      described in the generic DMA client binding. A tx and rx
+      specifier is required for each chip select.
+    minItems: 1
+    maxItems: 8
+
+  dma-names:
+    description:
+      List of DMA request names. These strings correspond 1:1 with
+      the DMA sepecifiers listed in dmas. The string names is to be
+      "rxN" and "txN" for RX and TX requests, respectively. Where N
+      is the chip select number.
+    minItems: 1
+    maxItems: 8
+
+required:
+  - compatible
+  - reg
+  - interrupts
+
+unevaluatedProperties: false
+
+if:
+  properties:
+    compatible:
+      oneOf:
+        - const: ti,omap2-mcspi
+        - const: ti,omap4-mcspi
+
+then:
+  properties:
+    ti,hwmods:
+      items:
+        - pattern: "^mcspi([1-9])$"
+
+else:
+  properties:
+    ti,hwmods: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/irq.h>
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/soc/ti,sci_pm_domain.h>
+
+    spi@2100000 {
+      compatible = "ti,am654-mcspi","ti,omap4-mcspi";
+      reg = <0x2100000 0x400>;
+      interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
+      clocks = <&k3_clks 137 1>;
+      power-domains = <&k3_pds 137 TI_SCI_PD_EXCLUSIVE>;
+      #address-cells = <1>;
+      #size-cells = <0>;
+      dmas = <&main_udmap 0xc500>, <&main_udmap 0x4500>;
+      dma-names = "tx0", "rx0";
+    };
diff --git a/Documentation/devicetree/bindings/spi/rockchip-sfc.yaml b/Documentation/devicetree/bindings/spi/rockchip-sfc.yaml
new file mode 100644 (file)
index 0000000..339fb39
--- /dev/null
@@ -0,0 +1,91 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/spi/rockchip-sfc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip Serial Flash Controller (SFC)
+
+maintainers:
+  - Heiko Stuebner <heiko@sntech.de>
+  - Chris Morgan <macromorgan@hotmail.com>
+
+allOf:
+  - $ref: spi-controller.yaml#
+
+properties:
+  compatible:
+    const: rockchip,sfc
+    description:
+      The rockchip sfc controller is a standalone IP with version register,
+      and the driver can handle all the feature difference inside the IP
+      depending on the version register.
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  clocks:
+    items:
+      - description: Bus Clock
+      - description: Module Clock
+
+  clock-names:
+    items:
+      - const: clk_sfc
+      - const: hclk_sfc
+
+  power-domains:
+    maxItems: 1
+
+  rockchip,sfc-no-dma:
+    description: Disable DMA and utilize FIFO mode only
+    type: boolean
+
+patternProperties:
+  "^flash@[0-3]$":
+    type: object
+    properties:
+      reg:
+        minimum: 0
+        maximum: 3
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - clocks
+  - clock-names
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/clock/px30-cru.h>
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/power/px30-power.h>
+
+    sfc: spi@ff3a0000 {
+        compatible = "rockchip,sfc";
+        reg = <0xff3a0000 0x4000>;
+        interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>;
+        clocks = <&cru SCLK_SFC>, <&cru HCLK_SFC>;
+        clock-names = "clk_sfc", "hclk_sfc";
+        pinctrl-0 = <&sfc_clk &sfc_cs &sfc_bus2>;
+        pinctrl-names = "default";
+        power-domains = <&power PX30_PD_MMC_NAND>;
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        flash@0 {
+            compatible = "jedec,spi-nor";
+            reg = <0>;
+            spi-max-frequency = <108000000>;
+            spi-rx-bus-width = <2>;
+            spi-tx-bus-width = <2>;
+        };
+    };
+
+...
index 4d0e4c1..2a24969 100644 (file)
@@ -11,6 +11,7 @@ Required properties:
     - mediatek,mt8135-spi: for mt8135 platforms
     - mediatek,mt8173-spi: for mt8173 platforms
     - mediatek,mt8183-spi: for mt8183 platforms
+    - mediatek,mt6893-spi: for mt6893 platforms
     - "mediatek,mt8192-spi", "mediatek,mt6765-spi": for mt8192 platforms
     - "mediatek,mt8195-spi", "mediatek,mt6765-spi": for mt8195 platforms
     - "mediatek,mt8516-spi", "mediatek,mt2712-spi": for mt8516 platforms
diff --git a/Documentation/devicetree/bindings/spi/spi-sprd-adi.txt b/Documentation/devicetree/bindings/spi/spi-sprd-adi.txt
deleted file mode 100644 (file)
index 2567c82..0000000
+++ /dev/null
@@ -1,63 +0,0 @@
-Spreadtrum ADI controller
-
-ADI is the abbreviation of Anolog-Digital interface, which is used to access
-analog chip (such as PMIC) from digital chip. ADI controller follows the SPI
-framework for its hardware implementation is alike to SPI bus and its timing
-is compatile to SPI timing.
-
-ADI controller has 50 channels including 2 software read/write channels and
-48 hardware channels to access analog chip. For 2 software read/write channels,
-users should set ADI registers to access analog chip. For hardware channels,
-we can configure them to allow other hardware components to use it independently,
-which means we can just link one analog chip address to one hardware channel,
-then users can access the mapped analog chip address by this hardware channel
-triggered by hardware components instead of ADI software channels.
-
-Thus we introduce one property named "sprd,hw-channels" to configure hardware
-channels, the first value specifies the hardware channel id which is used to
-transfer data triggered by hardware automatically, and the second value specifies
-the analog chip address where user want to access by hardware components.
-
-Since we have multi-subsystems will use unique ADI to access analog chip, when
-one system is reading/writing data by ADI software channels, that should be under
-one hardware spinlock protection to prevent other systems from reading/writing
-data by ADI software channels at the same time, or two parallel routine of setting
-ADI registers will make ADI controller registers chaos to lead incorrect results.
-Then we need one hardware spinlock to synchronize between the multiple subsystems.
-
-The new version ADI controller supplies multiple master channels for different
-subsystem accessing, that means no need to add hardware spinlock to synchronize,
-thus change the hardware spinlock support to be optional to keep backward
-compatibility.
-
-Required properties:
-- compatible: Should be "sprd,sc9860-adi".
-- reg: Offset and length of ADI-SPI controller register space.
-- #address-cells: Number of cells required to define a chip select address
-       on the ADI-SPI bus. Should be set to 1.
-- #size-cells: Size of cells required to define a chip select address size
-       on the ADI-SPI bus. Should be set to 0.
-
-Optional properties:
-- hwlocks: Reference to a phandle of a hwlock provider node.
-- hwlock-names: Reference to hwlock name strings defined in the same order
-       as the hwlocks, should be "adi".
-- sprd,hw-channels: This is an array of channel values up to 49 channels.
-       The first value specifies the hardware channel id which is used to
-       transfer data triggered by hardware automatically, and the second
-       value specifies the analog chip address where user want to access
-       by hardware components.
-
-SPI slave nodes must be children of the SPI controller node and can contain
-properties described in Documentation/devicetree/bindings/spi/spi-bus.txt.
-
-Example:
-       adi_bus: spi@40030000 {
-               compatible = "sprd,sc9860-adi";
-               reg = <0 0x40030000 0 0x10000>;
-               hwlocks = <&hwlock1 0>;
-               hwlock-names = "adi";
-               #address-cells = <1>;
-               #size-cells = <0>;
-               sprd,hw-channels = <30 0x8c20>;
-       };
diff --git a/Documentation/devicetree/bindings/spi/sprd,spi-adi.yaml b/Documentation/devicetree/bindings/spi/sprd,spi-adi.yaml
new file mode 100644 (file)
index 0000000..fe01402
--- /dev/null
@@ -0,0 +1,104 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/spi/sprd,spi-adi.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Spreadtrum ADI controller
+
+maintainers:
+  - Orson Zhai <orsonzhai@gmail.com>
+  - Baolin Wang <baolin.wang7@gmail.com>
+  - Chunyan Zhang <zhang.lyra@gmail.com>
+
+description: |
+  ADI is the abbreviation of Anolog-Digital interface, which is used to access
+  analog chip (such as PMIC) from digital chip. ADI controller follows the SPI
+  framework for its hardware implementation is alike to SPI bus and its timing
+  is compatile to SPI timing.
+
+  ADI controller has 50 channels including 2 software read/write channels and
+  48 hardware channels to access analog chip. For 2 software read/write channels,
+  users should set ADI registers to access analog chip. For hardware channels,
+  we can configure them to allow other hardware components to use it independently,
+  which means we can just link one analog chip address to one hardware channel,
+  then users can access the mapped analog chip address by this hardware channel
+  triggered by hardware components instead of ADI software channels.
+
+  Thus we introduce one property named "sprd,hw-channels" to configure hardware
+  channels, the first value specifies the hardware channel id which is used to
+  transfer data triggered by hardware automatically, and the second value specifies
+  the analog chip address where user want to access by hardware components.
+
+  Since we have multi-subsystems will use unique ADI to access analog chip, when
+  one system is reading/writing data by ADI software channels, that should be under
+  one hardware spinlock protection to prevent other systems from reading/writing
+  data by ADI software channels at the same time, or two parallel routine of setting
+  ADI registers will make ADI controller registers chaos to lead incorrect results.
+  Then we need one hardware spinlock to synchronize between the multiple subsystems.
+
+  The new version ADI controller supplies multiple master channels for different
+  subsystem accessing, that means no need to add hardware spinlock to synchronize,
+  thus change the hardware spinlock support to be optional to keep backward
+  compatibility.
+
+allOf:
+  - $ref: /spi/spi-controller.yaml#
+
+properties:
+  compatible:
+    enum:
+      - sprd,sc9860-adi
+      - sprd,sc9863-adi
+      - sprd,ums512-adi
+
+  reg:
+    maxItems: 1
+
+  hwlocks:
+    maxItems: 1
+
+  hwlock-names:
+    const: adi
+
+  sprd,hw-channels:
+    $ref: /schemas/types.yaml#/definitions/uint32-matrix
+    description: A list of hardware channels
+    minItems: 1
+    maxItems: 48
+    items:
+      items:
+        - description: The hardware channel id which is used to transfer data
+            triggered by hardware automatically, channel id 0-1 are for software
+            use, 2-49 are hardware channels.
+          minimum: 2
+          maximum: 49
+        - description: The analog chip address where user want to access by
+            hardware components.
+
+required:
+  - compatible
+  - reg
+  - '#address-cells'
+  - '#size-cells'
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    aon {
+        #address-cells = <2>;
+        #size-cells = <2>;
+
+        adi_bus: spi@40030000 {
+            compatible = "sprd,sc9860-adi";
+            reg = <0 0x40030000 0 0x10000>;
+            hwlocks = <&hwlock1 0>;
+            hwlock-names = "adi";
+            #address-cells = <1>;
+            #size-cells = <0>;
+            sprd,hw-channels = <30 0x8c20>;
+        };
+    };
+...
diff --git a/Documentation/devicetree/bindings/timer/rockchip,rk-timer.txt b/Documentation/devicetree/bindings/timer/rockchip,rk-timer.txt
deleted file mode 100644 (file)
index d65fdce..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-Rockchip rk timer
-
-Required properties:
-- compatible: should be:
-  "rockchip,rv1108-timer", "rockchip,rk3288-timer": for Rockchip RV1108
-  "rockchip,rk3036-timer", "rockchip,rk3288-timer": for Rockchip RK3036
-  "rockchip,rk3066-timer", "rockchip,rk3288-timer": for Rockchip RK3066
-  "rockchip,rk3188-timer", "rockchip,rk3288-timer": for Rockchip RK3188
-  "rockchip,rk3228-timer", "rockchip,rk3288-timer": for Rockchip RK3228
-  "rockchip,rk3229-timer", "rockchip,rk3288-timer": for Rockchip RK3229
-  "rockchip,rk3288-timer": for Rockchip RK3288
-  "rockchip,rk3368-timer", "rockchip,rk3288-timer": for Rockchip RK3368
-  "rockchip,rk3399-timer": for Rockchip RK3399
-- reg: base address of the timer register starting with TIMERS CONTROL register
-- interrupts: should contain the interrupts for Timer0
-- clocks : must contain an entry for each entry in clock-names
-- clock-names : must include the following entries:
-  "timer", "pclk"
-
-Example:
-       timer: timer@ff810000 {
-               compatible = "rockchip,rk3288-timer";
-               reg = <0xff810000 0x20>;
-               interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&xin24m>, <&cru PCLK_TIMER>;
-               clock-names = "timer", "pclk";
-       };
diff --git a/Documentation/devicetree/bindings/timer/rockchip,rk-timer.yaml b/Documentation/devicetree/bindings/timer/rockchip,rk-timer.yaml
new file mode 100644 (file)
index 0000000..e26ecb5
--- /dev/null
@@ -0,0 +1,64 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/timer/rockchip,rk-timer.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip Timer Device Tree Bindings
+
+maintainers:
+  - Daniel Lezcano <daniel.lezcano@linaro.org>
+
+properties:
+  compatible:
+    oneOf:
+      - const: rockchip,rk3288-timer
+      - const: rockchip,rk3399-timer
+      - items:
+          - enum:
+              - rockchip,rv1108-timer
+              - rockchip,rk3036-timer
+              - rockchip,rk3066-timer
+              - rockchip,rk3188-timer
+              - rockchip,rk3228-timer
+              - rockchip,rk3229-timer
+              - rockchip,rk3288-timer
+              - rockchip,rk3368-timer
+              - rockchip,px30-timer
+          - const: rockchip,rk3288-timer
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  clocks:
+    minItems: 2
+    maxItems: 2
+
+  clock-names:
+    items:
+      - const: pclk
+      - const: timer
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - clocks
+  - clock-names
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/clock/rk3288-cru.h>
+
+    timer: timer@ff810000 {
+        compatible = "rockchip,rk3288-timer";
+        reg = <0xff810000 0x20>;
+        interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
+        clocks = <&cru PCLK_TIMER>, <&xin24m>;
+        clock-names = "pclk", "timer";
+    };
index f5a3207..c57c609 100644 (file)
@@ -85,7 +85,6 @@ available subsections can be seen below.
    io-mapping
    io_ordering
    generic-counter
-   lightnvm-pblk
    memory-devices/index
    men-chameleon-bus
    ntb
diff --git a/Documentation/driver-api/lightnvm-pblk.rst b/Documentation/driver-api/lightnvm-pblk.rst
deleted file mode 100644 (file)
index 1040ed1..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-pblk: Physical Block Device Target
-==================================
-
-pblk implements a fully associative, host-based FTL that exposes a traditional
-block I/O interface. Its primary responsibilities are:
-
-  - Map logical addresses onto physical addresses (4KB granularity) in a
-    logical-to-physical (L2P) table.
-  - Maintain the integrity and consistency of the L2P table as well as its
-    recovery from normal tear down and power outage.
-  - Deal with controller- and media-specific constrains.
-  - Handle I/O errors.
-  - Implement garbage collection.
-  - Maintain consistency across the I/O stack during synchronization points.
-
-For more information please refer to:
-
-  http://lightnvm.io
-
-which maintains updated FAQs, manual pages, technical documentation, tools,
-contacts, etc.
index 2183fd8..2a75dd5 100644 (file)
@@ -271,19 +271,19 @@ prototypes::
 locking rules:
        All except set_page_dirty and freepage may block
 
-====================== ======================== =========
-ops                    PageLocked(page)         i_rwsem
-====================== ======================== =========
+====================== ======================== =========      ===============
+ops                    PageLocked(page)         i_rwsem        invalidate_lock
+====================== ======================== =========      ===============
 writepage:             yes, unlocks (see below)
-readpage:              yes, unlocks
+readpage:              yes, unlocks                            shared
 writepages:
 set_page_dirty         no
-readahead:             yes, unlocks
-readpages:             no
+readahead:             yes, unlocks                            shared
+readpages:             no                                      shared
 write_begin:           locks the page           exclusive
 write_end:             yes, unlocks             exclusive
 bmap:
-invalidatepage:                yes
+invalidatepage:                yes                                     exclusive
 releasepage:           yes
 freepage:              yes
 direct_IO:
@@ -295,7 +295,7 @@ is_partially_uptodate:      yes
 error_remove_page:     yes
 swap_activate:         no
 swap_deactivate:       no
-====================== ======================== =========
+====================== ======================== =========      ===============
 
 ->write_begin(), ->write_end() and ->readpage() may be called from
 the request handler (/dev/loop).
@@ -378,7 +378,10 @@ keep it that way and don't breed new callers.
 ->invalidatepage() is called when the filesystem must attempt to drop
 some or all of the buffers from the page when it is being truncated. It
 returns zero on success. If ->invalidatepage is zero, the kernel uses
-block_invalidatepage() instead.
+block_invalidatepage() instead. The filesystem must exclusively acquire
+invalidate_lock before invalidating page cache in truncate / hole punch path
+(and thus calling into ->invalidatepage) to block races between page cache
+invalidation and page cache filling functions (fault, read, ...).
 
 ->releasepage() is called when the kernel is about to try to drop the
 buffers from the page in preparation for freeing it.  It returns zero to
@@ -506,6 +509,7 @@ prototypes::
        ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
        ssize_t (*read_iter) (struct kiocb *, struct iov_iter *);
        ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
+       int (*iopoll) (struct kiocb *kiocb, bool spin);
        int (*iterate) (struct file *, struct dir_context *);
        int (*iterate_shared) (struct file *, struct dir_context *);
        __poll_t (*poll) (struct file *, struct poll_table_struct *);
@@ -518,12 +522,6 @@ prototypes::
        int (*fsync) (struct file *, loff_t start, loff_t end, int datasync);
        int (*fasync) (int, struct file *, int);
        int (*lock) (struct file *, int, struct file_lock *);
-       ssize_t (*readv) (struct file *, const struct iovec *, unsigned long,
-                       loff_t *);
-       ssize_t (*writev) (struct file *, const struct iovec *, unsigned long,
-                       loff_t *);
-       ssize_t (*sendfile) (struct file *, loff_t *, size_t, read_actor_t,
-                       void __user *);
        ssize_t (*sendpage) (struct file *, struct page *, int, size_t,
                        loff_t *, int);
        unsigned long (*get_unmapped_area)(struct file *, unsigned long,
@@ -536,6 +534,14 @@ prototypes::
                        size_t, unsigned int);
        int (*setlease)(struct file *, long, struct file_lock **, void **);
        long (*fallocate)(struct file *, int, loff_t, loff_t);
+       void (*show_fdinfo)(struct seq_file *m, struct file *f);
+       unsigned (*mmap_capabilities)(struct file *);
+       ssize_t (*copy_file_range)(struct file *, loff_t, struct file *,
+                       loff_t, size_t, unsigned int);
+       loff_t (*remap_file_range)(struct file *file_in, loff_t pos_in,
+                       struct file *file_out, loff_t pos_out,
+                       loff_t len, unsigned int remap_flags);
+       int (*fadvise)(struct file *, loff_t, loff_t, int);
 
 locking rules:
        All may block.
@@ -570,6 +576,25 @@ in sys_read() and friends.
 the lease within the individual filesystem to record the result of the
 operation
 
+->fallocate implementation must be really careful to maintain page cache
+consistency when punching holes or performing other operations that invalidate
+page cache contents. Usually the filesystem needs to call
+truncate_inode_pages_range() to invalidate relevant range of the page cache.
+However the filesystem usually also needs to update its internal (and on disk)
+view of file offset -> disk block mapping. Until this update is finished, the
+filesystem needs to block page faults and reads from reloading now-stale page
+cache contents from the disk. Since VFS acquires mapping->invalidate_lock in
+shared mode when loading pages from disk (filemap_fault(), filemap_read(),
+readahead paths), the fallocate implementation must take the invalidate_lock to
+prevent reloading.
+
+->copy_file_range and ->remap_file_range implementations need to serialize
+against modifications of file data while the operation is running. For
+blocking changes through write(2) and similar operations inode->i_rwsem can be
+used. To block changes to file contents via a memory mapping during the
+operation, the filesystem must take mapping->invalidate_lock to coordinate
+with ->page_mkwrite.
+
 dquot_operations
 ================
 
@@ -627,11 +652,11 @@ pfn_mkwrite:      yes
 access:                yes
 =============  =========       ===========================
 
-->fault() is called when a previously not present pte is about
-to be faulted in. The filesystem must find and return the page associated
-with the passed in "pgoff" in the vm_fault structure. If it is possible that
-the page may be truncated and/or invalidated, then the filesystem must lock
-the page, then ensure it is not already truncated (the page lock will block
+->fault() is called when a previously not present pte is about to be faulted
+in. The filesystem must find and return the page associated with the passed in
+"pgoff" in the vm_fault structure. If it is possible that the page may be
+truncated and/or invalidated, then the filesystem must lock invalidate_lock,
+then ensure the page is not already truncated (invalidate_lock will block
 subsequent truncate), and then return with VM_FAULT_LOCKED, and the page
 locked. The VM will unlock the page.
 
@@ -644,12 +669,14 @@ page table entry. Pointer to entry associated with the page is passed in
 "pte" field in vm_fault structure. Pointers to entries for other offsets
 should be calculated relative to "pte".
 
-->page_mkwrite() is called when a previously read-only pte is
-about to become writeable. The filesystem again must ensure that there are
-no truncate/invalidate races, and then return with the page locked. If
-the page has been truncated, the filesystem should not look up a new page
-like the ->fault() handler, but simply return with VM_FAULT_NOPAGE, which
-will cause the VM to retry the fault.
+->page_mkwrite() is called when a previously read-only pte is about to become
+writeable. The filesystem again must ensure that there are no
+truncate/invalidate races or races with operations such as ->remap_file_range
+or ->copy_file_range, and then return with the page locked. Usually
+mapping->invalidate_lock is suitable for proper serialization. If the page has
+been truncated, the filesystem should not look up a new page like the ->fault()
+handler, but simply return with VM_FAULT_NOPAGE, which will cause the VM to
+retry the fault.
 
 ->pfn_mkwrite() is the same as page_mkwrite but when the pte is
 VM_PFNMAP or VM_MIXEDMAP with a page-less entry. Expected return is
diff --git a/Documentation/filesystems/mandatory-locking.rst b/Documentation/filesystems/mandatory-locking.rst
deleted file mode 100644 (file)
index 9ce7354..0000000
+++ /dev/null
@@ -1,188 +0,0 @@
-.. SPDX-License-Identifier: GPL-2.0
-
-=====================================================
-Mandatory File Locking For The Linux Operating System
-=====================================================
-
-               Andy Walker <andy@lysaker.kvaerner.no>
-
-                          15 April 1996
-
-                    (Updated September 2007)
-
-0. Why you should avoid mandatory locking
------------------------------------------
-
-The Linux implementation is prey to a number of difficult-to-fix race
-conditions which in practice make it not dependable:
-
-       - The write system call checks for a mandatory lock only once
-         at its start.  It is therefore possible for a lock request to
-         be granted after this check but before the data is modified.
-         A process may then see file data change even while a mandatory
-         lock was held.
-       - Similarly, an exclusive lock may be granted on a file after
-         the kernel has decided to proceed with a read, but before the
-         read has actually completed, and the reading process may see
-         the file data in a state which should not have been visible
-         to it.
-       - Similar races make the claimed mutual exclusion between lock
-         and mmap similarly unreliable.
-
-1. What is  mandatory locking?
-------------------------------
-
-Mandatory locking is kernel enforced file locking, as opposed to the more usual
-cooperative file locking used to guarantee sequential access to files among
-processes. File locks are applied using the flock() and fcntl() system calls
-(and the lockf() library routine which is a wrapper around fcntl().) It is
-normally a process' responsibility to check for locks on a file it wishes to
-update, before applying its own lock, updating the file and unlocking it again.
-The most commonly used example of this (and in the case of sendmail, the most
-troublesome) is access to a user's mailbox. The mail user agent and the mail
-transfer agent must guard against updating the mailbox at the same time, and
-prevent reading the mailbox while it is being updated.
-
-In a perfect world all processes would use and honour a cooperative, or
-"advisory" locking scheme. However, the world isn't perfect, and there's
-a lot of poorly written code out there.
-
-In trying to address this problem, the designers of System V UNIX came up
-with a "mandatory" locking scheme, whereby the operating system kernel would
-block attempts by a process to write to a file that another process holds a
-"read" -or- "shared" lock on, and block attempts to both read and write to a 
-file that a process holds a "write " -or- "exclusive" lock on.
-
-The System V mandatory locking scheme was intended to have as little impact as
-possible on existing user code. The scheme is based on marking individual files
-as candidates for mandatory locking, and using the existing fcntl()/lockf()
-interface for applying locks just as if they were normal, advisory locks.
-
-.. Note::
-
-   1. In saying "file" in the paragraphs above I am actually not telling
-      the whole truth. System V locking is based on fcntl(). The granularity of
-      fcntl() is such that it allows the locking of byte ranges in files, in
-      addition to entire files, so the mandatory locking rules also have byte
-      level granularity.
-
-   2. POSIX.1 does not specify any scheme for mandatory locking, despite
-      borrowing the fcntl() locking scheme from System V. The mandatory locking
-      scheme is defined by the System V Interface Definition (SVID) Version 3.
-
-2. Marking a file for mandatory locking
----------------------------------------
-
-A file is marked as a candidate for mandatory locking by setting the group-id
-bit in its file mode but removing the group-execute bit. This is an otherwise
-meaningless combination, and was chosen by the System V implementors so as not
-to break existing user programs.
-
-Note that the group-id bit is usually automatically cleared by the kernel when
-a setgid file is written to. This is a security measure. The kernel has been
-modified to recognize the special case of a mandatory lock candidate and to
-refrain from clearing this bit. Similarly the kernel has been modified not
-to run mandatory lock candidates with setgid privileges.
-
-3. Available implementations
-----------------------------
-
-I have considered the implementations of mandatory locking available with
-SunOS 4.1.x, Solaris 2.x and HP-UX 9.x.
-
-Generally I have tried to make the most sense out of the behaviour exhibited
-by these three reference systems. There are many anomalies.
-
-All the reference systems reject all calls to open() for a file on which
-another process has outstanding mandatory locks. This is in direct
-contravention of SVID 3, which states that only calls to open() with the
-O_TRUNC flag set should be rejected. The Linux implementation follows the SVID
-definition, which is the "Right Thing", since only calls with O_TRUNC can
-modify the contents of the file.
-
-HP-UX even disallows open() with O_TRUNC for a file with advisory locks, not
-just mandatory locks. That would appear to contravene POSIX.1.
-
-mmap() is another interesting case. All the operating systems mentioned
-prevent mandatory locks from being applied to an mmap()'ed file, but  HP-UX
-also disallows advisory locks for such a file. SVID actually specifies the
-paranoid HP-UX behaviour.
-
-In my opinion only MAP_SHARED mappings should be immune from locking, and then
-only from mandatory locks - that is what is currently implemented.
-
-SunOS is so hopeless that it doesn't even honour the O_NONBLOCK flag for
-mandatory locks, so reads and writes to locked files always block when they
-should return EAGAIN.
-
-I'm afraid that this is such an esoteric area that the semantics described
-below are just as valid as any others, so long as the main points seem to
-agree. 
-
-4. Semantics
-------------
-
-1. Mandatory locks can only be applied via the fcntl()/lockf() locking
-   interface - in other words the System V/POSIX interface. BSD style
-   locks using flock() never result in a mandatory lock.
-
-2. If a process has locked a region of a file with a mandatory read lock, then
-   other processes are permitted to read from that region. If any of these
-   processes attempts to write to the region it will block until the lock is
-   released, unless the process has opened the file with the O_NONBLOCK
-   flag in which case the system call will return immediately with the error
-   status EAGAIN.
-
-3. If a process has locked a region of a file with a mandatory write lock, all
-   attempts to read or write to that region block until the lock is released,
-   unless a process has opened the file with the O_NONBLOCK flag in which case
-   the system call will return immediately with the error status EAGAIN.
-
-4. Calls to open() with O_TRUNC, or to creat(), on a existing file that has
-   any mandatory locks owned by other processes will be rejected with the
-   error status EAGAIN.
-
-5. Attempts to apply a mandatory lock to a file that is memory mapped and
-   shared (via mmap() with MAP_SHARED) will be rejected with the error status
-   EAGAIN.
-
-6. Attempts to create a shared memory map of a file (via mmap() with MAP_SHARED)
-   that has any mandatory locks in effect will be rejected with the error status
-   EAGAIN.
-
-5. Which system calls are affected?
------------------------------------
-
-Those which modify a file's contents, not just the inode. That gives read(),
-write(), readv(), writev(), open(), creat(), mmap(), truncate() and
-ftruncate(). truncate() and ftruncate() are considered to be "write" actions
-for the purposes of mandatory locking.
-
-The affected region is usually defined as stretching from the current position
-for the total number of bytes read or written. For the truncate calls it is
-defined as the bytes of a file removed or added (we must also consider bytes
-added, as a lock can specify just "the whole file", rather than a specific
-range of bytes.)
-
-Note 3: I may have overlooked some system calls that need mandatory lock
-checking in my eagerness to get this code out the door. Please let me know, or
-better still fix the system calls yourself and submit a patch to me or Linus.
-
-6. Warning!
------------
-
-Not even root can override a mandatory lock, so runaway processes can wreak
-havoc if they lock crucial files. The way around it is to change the file
-permissions (remove the setgid bit) before trying to read or write to it.
-Of course, that might be a bit tricky if the system is hung :-(
-
-7. The "mand" mount option
---------------------------
-Mandatory locking is disabled on all filesystems by default, and must be
-administratively enabled by mounting with "-o mand". That mount option
-is only allowed if the mounting task has the CAP_SYS_ADMIN capability.
-
-Since kernel v4.5, it is possible to disable mandatory locking
-altogether by setting CONFIG_MANDATORY_FILE_LOCKING to "n". A kernel
-with this disabled will reject attempts to mount filesystems with the
-"mand" mount option with the error status EPERM.
index 675ba86..b421a3c 100644 (file)
@@ -18,114 +18,5 @@ real, with all the uAPI bits is:
         * Route shmem backend over to TTM SYSTEM for discrete
         * TTM purgeable object support
         * Move i915 buddy allocator over to TTM
-        * MMAP ioctl mode(see `I915 MMAP`_)
-        * SET/GET ioctl caching(see `I915 SET/GET CACHING`_)
 * Send RFC(with mesa-dev on cc) for final sign off on the uAPI
 * Add pciid for DG1 and turn on uAPI for real
-
-New object placement and region query uAPI
-==========================================
-Starting from DG1 we need to give userspace the ability to allocate buffers from
-device local-memory. Currently the driver supports gem_create, which can place
-buffers in system memory via shmem, and the usual assortment of other
-interfaces, like dumb buffers and userptr.
-
-To support this new capability, while also providing a uAPI which will work
-beyond just DG1, we propose to offer three new bits of uAPI:
-
-DRM_I915_QUERY_MEMORY_REGIONS
------------------------------
-New query ID which allows userspace to discover the list of supported memory
-regions(like system-memory and local-memory) for a given device. We identify
-each region with a class and instance pair, which should be unique. The class
-here would be DEVICE or SYSTEM, and the instance would be zero, on platforms
-like DG1.
-
-Side note: The class/instance design is borrowed from our existing engine uAPI,
-where we describe every physical engine in terms of its class, and the
-particular instance, since we can have more than one per class.
-
-In the future we also want to expose more information which can further
-describe the capabilities of a region.
-
-.. kernel-doc:: include/uapi/drm/i915_drm.h
-        :functions: drm_i915_gem_memory_class drm_i915_gem_memory_class_instance drm_i915_memory_region_info drm_i915_query_memory_regions
-
-GEM_CREATE_EXT
---------------
-New ioctl which is basically just gem_create but now allows userspace to provide
-a chain of possible extensions. Note that if we don't provide any extensions and
-set flags=0 then we get the exact same behaviour as gem_create.
-
-Side note: We also need to support PXP[1] in the near future, which is also
-applicable to integrated platforms, and adds its own gem_create_ext extension,
-which basically lets userspace mark a buffer as "protected".
-
-.. kernel-doc:: include/uapi/drm/i915_drm.h
-        :functions: drm_i915_gem_create_ext
-
-I915_GEM_CREATE_EXT_MEMORY_REGIONS
-----------------------------------
-Implemented as an extension for gem_create_ext, we would now allow userspace to
-optionally provide an immutable list of preferred placements at creation time,
-in priority order, for a given buffer object.  For the placements we expect
-them each to use the class/instance encoding, as per the output of the regions
-query. Having the list in priority order will be useful in the future when
-placing an object, say during eviction.
-
-.. kernel-doc:: include/uapi/drm/i915_drm.h
-        :functions: drm_i915_gem_create_ext_memory_regions
-
-One fair criticism here is that this seems a little over-engineered[2]. If we
-just consider DG1 then yes, a simple gem_create.flags or something is totally
-all that's needed to tell the kernel to allocate the buffer in local-memory or
-whatever. However looking to the future we need uAPI which can also support
-upcoming Xe HP multi-tile architecture in a sane way, where there can be
-multiple local-memory instances for a given device, and so using both class and
-instance in our uAPI to describe regions is desirable, although specifically
-for DG1 it's uninteresting, since we only have a single local-memory instance.
-
-Existing uAPI issues
-====================
-Some potential issues we still need to resolve.
-
-I915 MMAP
----------
-In i915 there are multiple ways to MMAP GEM object, including mapping the same
-object using different mapping types(WC vs WB), i.e multiple active mmaps per
-object. TTM expects one MMAP at most for the lifetime of the object. If it
-turns out that we have to backpedal here, there might be some potential
-userspace fallout.
-
-I915 SET/GET CACHING
---------------------
-In i915 we have set/get_caching ioctl. TTM doesn't let us to change this, but
-DG1 doesn't support non-snooped pcie transactions, so we can just always
-allocate as WB for smem-only buffers.  If/when our hw gains support for
-non-snooped pcie transactions then we must fix this mode at allocation time as
-a new GEM extension.
-
-This is related to the mmap problem, because in general (meaning, when we're
-not running on intel cpus) the cpu mmap must not, ever, be inconsistent with
-allocation mode.
-
-Possible idea is to let the kernel picks the mmap mode for userspace from the
-following table:
-
-smem-only: WB. Userspace does not need to call clflush.
-
-smem+lmem: We only ever allow a single mode, so simply allocate this as uncached
-memory, and always give userspace a WC mapping. GPU still does snooped access
-here(assuming we can't turn it off like on DG1), which is a bit inefficient.
-
-lmem only: always WC
-
-This means on discrete you only get a single mmap mode, all others must be
-rejected. That's probably going to be a new default mode or something like
-that.
-
-Links
-=====
-[1] https://patchwork.freedesktop.org/series/86798/
-
-[2] https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/5599#note_553791
index 8b76217..6270f1f 100644 (file)
@@ -17,6 +17,7 @@ Introduction
    busses/index
    i2c-topology
    muxes/i2c-mux-gpio
+   i2c-sysfs
 
 Writing device drivers
 ======================
index 91b2cf7..e26532f 100644 (file)
@@ -228,6 +228,23 @@ before posting to the mailing list. The patchwork build bot instance
 gets overloaded very easily and netdev@vger really doesn't need more
 traffic if we can help it.
 
+netdevsim is great, can I extend it for my out-of-tree tests?
+-------------------------------------------------------------
+
+No, `netdevsim` is a test vehicle solely for upstream tests.
+(Please add your tests under tools/testing/selftests/.)
+
+We also give no guarantees that `netdevsim` won't change in the future
+in a way which would break what would normally be considered uAPI.
+
+Is netdevsim considered a "user" of an API?
+-------------------------------------------
+
+Linux kernel has a long standing rule that no API should be added unless
+it has a real, in-tree user. Mock-ups and tests based on `netdevsim` are
+strongly encouraged when adding new APIs, but `netdevsim` in itself
+is **not** considered a use case/user.
+
 Any other tips to help ensure my net/net-next patch gets OK'd?
 --------------------------------------------------------------
 Attention to detail.  Re-read your own work as if you were the
index d31ed6c..024d784 100644 (file)
@@ -191,19 +191,9 @@ nf_flowtable_tcp_timeout - INTEGER (seconds)
         TCP connections may be offloaded from nf conntrack to nf flow table.
         Once aged, the connection is returned to nf conntrack with tcp pickup timeout.
 
-nf_flowtable_tcp_pickup - INTEGER (seconds)
-        default 120
-
-        TCP connection timeout after being aged from nf flow table offload.
-
 nf_flowtable_udp_timeout - INTEGER (seconds)
         default 30
 
         Control offload timeout for udp connections.
         UDP connections may be offloaded from nf conntrack to nf flow table.
         Once aged, the connection is returned to nf conntrack with udp pickup timeout.
-
-nf_flowtable_udp_pickup - INTEGER (seconds)
-        default 30
-
-        UDP connection timeout after being aged from nf flow table offload.
index 9c918f7..1ee2141 100644 (file)
@@ -73,7 +73,9 @@ IF_OPER_LOWERLAYERDOWN (3):
  state (f.e. VLAN).
 
 IF_OPER_TESTING (4):
- Unused in current kernel.
+ Interface is in testing mode, for example executing driver self-tests
+ or media (cable) test. It can't be used for normal traffic until tests
+ complete.
 
 IF_OPER_DORMANT (5):
  Interface is L1 up, but waiting for an external event, f.e. for a
@@ -111,7 +113,7 @@ it as lower layer.
 
 Note that for certain kind of soft-devices, which are not managing any
 real hardware, it is possible to set this bit from userspace.  One
-should use TVL IFLA_CARRIER to do so.
+should use TLV IFLA_CARRIER to do so.
 
 netif_carrier_ok() can be used to query that bit.
 
index cfc81e9..4e5b26f 100644 (file)
@@ -2762,7 +2762,7 @@ listed in:
   put_prev_task_idle
   kmem_cache_create
   pick_next_task_rt
-  get_online_cpus
+  cpus_read_lock
   pick_next_task_fair
   mutex_lock
   [...]
index 1409e40..b7070d7 100644 (file)
@@ -160,7 +160,6 @@ Code  Seq#    Include File                                           Comments
 'K'   all    linux/kd.h
 'L'   00-1F  linux/loop.h                                            conflict!
 'L'   10-1F  drivers/scsi/mpt3sas/mpt3sas_ctl.h                      conflict!
-'L'   20-2F  linux/lightnvm.h
 'L'   E0-FF  linux/ppdd.h                                            encrypted disk device driver
                                                                      <http://linux01.gwdg.de/~alatham/ppdd.html>
 'M'   all    linux/soundcard.h                                       conflict!
index d612198..539e9d4 100644 (file)
@@ -263,7 +263,7 @@ Userspace can also add file descriptors to the notifying process via
 ``ioctl(SECCOMP_IOCTL_NOTIF_ADDFD)``. The ``id`` member of
 ``struct seccomp_notif_addfd`` should be the same ``id`` as in
 ``struct seccomp_notif``. The ``newfd_flags`` flag may be used to set flags
-like O_EXEC on the file descriptor in the notifying process. If the supervisor
+like O_CLOEXEC on the file descriptor in the notifying process. If the supervisor
 wants to inject the file descriptor with a specific number, the
 ``SECCOMP_ADDFD_FLAG_SETFD`` flag can be used, and set the ``newfd`` member to
 the specific number to use. If that file descriptor is already open in the
index 7ddd8f6..5e8ed9e 100644 (file)
@@ -106,3 +106,11 @@ Speculation misfeature controls
    * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_ENABLE, 0, 0);
    * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_DISABLE, 0, 0);
    * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_FORCE_DISABLE, 0, 0);
+
+- PR_SPEC_L1D_FLUSH: Flush L1D Cache on context switch out of the task
+                        (works only when tasks run on non SMT cores)
+
+  Invocations:
+   * prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_L1D_FLUSH, 0, 0, 0);
+   * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_L1D_FLUSH, PR_SPEC_ENABLE, 0, 0);
+   * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_L1D_FLUSH, PR_SPEC_DISABLE, 0, 0);
index c7b165c..dae68e6 100644 (file)
@@ -855,7 +855,7 @@ in-kernel irqchip (GIC), and for in-kernel irqchip can tell the GIC to
 use PPIs designated for specific cpus.  The irq field is interpreted
 like this::
 
 bits:  |  31 ... 28  | 27 ... 24 | 23  ... 16 | 15 ... 0 |
 bits:  |  31 ... 28  | 27 ... 24 | 23  ... 16 | 15 ... 0 |
   field: | vcpu2_index | irq_type  | vcpu_index |  irq_id  |
 
 The irq_type field has the following values:
@@ -2149,10 +2149,10 @@ prior to calling the KVM_RUN ioctl.
 Errors:
 
   ======   ============================================================
 ENOENT   no such register
 EINVAL   invalid register ID, or no such register or used with VMs in
 ENOENT   no such register
 EINVAL   invalid register ID, or no such register or used with VMs in
            protected virtualization mode on s390
 EPERM    (arm64) register access not allowed before vcpu finalization
 EPERM    (arm64) register access not allowed before vcpu finalization
   ======   ============================================================
 
 (These error codes are indicative only: do not rely on a specific error
@@ -2590,10 +2590,10 @@ following id bit patterns::
 Errors include:
 
   ======== ============================================================
 ENOENT   no such register
 EINVAL   invalid register ID, or no such register or used with VMs in
 ENOENT   no such register
 EINVAL   invalid register ID, or no such register or used with VMs in
            protected virtualization mode on s390
 EPERM    (arm64) register access not allowed before vcpu finalization
 EPERM    (arm64) register access not allowed before vcpu finalization
   ======== ============================================================
 
 (These error codes are indicative only: do not rely on a specific error
@@ -3112,13 +3112,13 @@ current state.  "addr" is ignored.
 Errors:
 
   ======     =================================================================
 EINVAL     the target is unknown, or the combination of features is invalid.
 ENOENT     a features bit specified is unknown.
 EINVAL     the target is unknown, or the combination of features is invalid.
 ENOENT     a features bit specified is unknown.
   ======     =================================================================
 
 This tells KVM what type of CPU to present to the guest, and what
-optional features it should have.  This will cause a reset of the cpu
-registers to their initial values.  If this is not called, KVM_RUN will
+optional features it should have.  This will cause a reset of the cpu
+registers to their initial values.  If this is not called, KVM_RUN will
 return ENOEXEC for that vcpu.
 
 The initial values are defined as:
@@ -3239,8 +3239,8 @@ VCPU matching underlying host.
 Errors:
 
   =====      ==============================================================
 E2BIG      the reg index list is too big to fit in the array specified by
            the user (the number required will be written into n).
 E2BIG      the reg index list is too big to fit in the array specified by
            the user (the number required will be written into n).
   =====      ==============================================================
 
 ::
@@ -3288,7 +3288,7 @@ specific device.
 ARM/arm64 divides the id field into two parts, a device id and an
 address type id specific to the individual device::
 
 bits:  | 63        ...       32 | 31    ...    16 | 15    ...    0 |
 bits:  | 63        ...       32 | 31    ...    16 | 15    ...    0 |
   field: |        0x00000000      |     device id   |  addr type id  |
 
 ARM/arm64 currently only require this when using the in-kernel GIC
@@ -7049,7 +7049,7 @@ In combination with KVM_CAP_X86_USER_SPACE_MSR, this allows user space to
 trap and emulate MSRs that are outside of the scope of KVM as well as
 limit the attack surface on KVM's MSR emulation code.
 
-8.28 KVM_CAP_ENFORCE_PV_CPUID
+8.28 KVM_CAP_ENFORCE_PV_FEATURE_CPUID
 -----------------------------
 
 Architectures: x86
index 35eca37..88fa495 100644 (file)
@@ -25,10 +25,10 @@ On x86:
 
 - vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock
 
-- kvm->arch.mmu_lock is an rwlock.  kvm->arch.tdp_mmu_pages_lock is
-  taken inside kvm->arch.mmu_lock, and cannot be taken without already
-  holding kvm->arch.mmu_lock (typically with ``read_lock``, otherwise
-  there's no need to take kvm->arch.tdp_mmu_pages_lock at all).
+- kvm->arch.mmu_lock is an rwlock.  kvm->arch.tdp_mmu_pages_lock and
+  kvm->arch.mmu_unsync_pages_lock are taken inside kvm->arch.mmu_lock, and
+  cannot be taken without already holding kvm->arch.mmu_lock (typically with
+  ``read_lock`` for the TDP MMU, thus the need for additional spinlocks).
 
 Everything else is a leaf: no other lock is taken inside the critical
 sections.
index 5f62b3b..ccb7e86 100644 (file)
@@ -126,7 +126,7 @@ Idle loop
 Rebooting
 =========
 
-   reboot=b[ios] | t[riple] | k[bd] | a[cpi] | e[fi] [, [w]arm | [c]old]
+   reboot=b[ios] | t[riple] | k[bd] | a[cpi] | e[fi] | p[ci] [, [w]arm | [c]old]
       bios
         Use the CPU reboot vector for warm reset
       warm
@@ -145,6 +145,8 @@ Rebooting
         Use efi reset_system runtime service. If EFI is not configured or
         the EFI reset does not work, the reboot path attempts the reset using
         the keyboard controller.
+      pci
+        Use a write to the PCI config space register 0xcf9 to trigger reboot.
 
    Using warm reset will be much faster especially on big memory
    systems because the BIOS will not go through the memory check.
@@ -155,6 +157,13 @@ Rebooting
      Don't stop other CPUs on reboot. This can make reboot more reliable
      in some cases.
 
+   reboot=default
+     There are some built-in platform specific "quirks" - you may see:
+     "reboot: <name> series board detected. Selecting <type> for reboots."
+     In the case where you think the quirk is in error (e.g. you have
+     newer BIOS, or newer board) using this option will ignore the built-in
+     quirk table, and use the generic default reboot actions.
+
 Non Executable Mappings
 =======================
 
index 19135a9..142f3a3 100644 (file)
@@ -3866,6 +3866,16 @@ L:       bcm-kernel-feedback-list@broadcom.com
 S:     Maintained
 F:     drivers/mtd/nand/raw/brcmnand/
 
+BROADCOM STB PCIE DRIVER
+M:     Jim Quinlan <jim2101024@gmail.com>
+M:     Nicolas Saenz Julienne <nsaenz@kernel.org>
+M:     Florian Fainelli <f.fainelli@gmail.com>
+M:     bcm-kernel-feedback-list@broadcom.com
+L:     linux-pci@vger.kernel.org
+S:     Maintained
+F:     Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml
+F:     drivers/pci/controller/pcie-brcmstb.c
+
 BROADCOM SYSTEMPORT ETHERNET DRIVER
 M:     Florian Fainelli <f.fainelli@gmail.com>
 L:     bcm-kernel-feedback-list@broadcom.com
@@ -4498,7 +4508,7 @@ L:        clang-built-linux@googlegroups.com
 S:     Supported
 W:     https://clangbuiltlinux.github.io/
 B:     https://github.com/ClangBuiltLinux/linux/issues
-C:     irc://chat.freenode.net/clangbuiltlinux
+C:     irc://irc.libera.chat/clangbuiltlinux
 F:     Documentation/kbuild/llvm.rst
 F:     include/linux/compiler-clang.h
 F:     scripts/clang-tools/
@@ -6945,7 +6955,7 @@ F:        include/uapi/linux/mdio.h
 F:     include/uapi/linux/mii.h
 
 EXFAT FILE SYSTEM
-M:     Namjae Jeon <namjae.jeon@samsung.com>
+M:     Namjae Jeon <linkinjeon@kernel.org>
 M:     Sungjong Seo <sj1557.seo@samsung.com>
 L:     linux-fsdevel@vger.kernel.org
 S:     Maintained
@@ -7858,9 +7868,9 @@ S:        Maintained
 F:     drivers/input/touchscreen/goodix.c
 
 GOOGLE ETHERNET DRIVERS
-M:     Catherine Sullivan <csully@google.com>
-R:     Sagi Shahar <sagis@google.com>
-R:     Jon Olson <jonolson@google.com>
+M:     Jeroen de Borst <jeroendb@google.com>
+R:     Catherine Sullivan <csully@google.com>
+R:     David Awogbemila <awogbemila@google.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     Documentation/networking/device_drivers/ethernet/google/gve.rst
@@ -10609,15 +10619,6 @@ F:     LICENSES/
 F:     scripts/spdxcheck-test.sh
 F:     scripts/spdxcheck.py
 
-LIGHTNVM PLATFORM SUPPORT
-M:     Matias Bjorling <mb@lightnvm.io>
-L:     linux-block@vger.kernel.org
-S:     Maintained
-W:     http://github/OpenChannelSSD
-F:     drivers/lightnvm/
-F:     include/linux/lightnvm.h
-F:     include/uapi/linux/lightnvm.h
-
 LINEAR RANGES HELPERS
 M:     Mark Brown <broonie@kernel.org>
 R:     Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
@@ -11327,6 +11328,12 @@ W:     https://linuxtv.org
 T:     git git://linuxtv.org/media_tree.git
 F:     drivers/media/radio/radio-maxiradio*
 
+MCBA MICROCHIP CAN BUS ANALYZER TOOL DRIVER
+R:     Yasushi SHOJI <yashi@spacecubics.com>
+L:     linux-can@vger.kernel.org
+S:     Maintained
+F:     drivers/net/can/usb/mcba_usb.c
+
 MCAN MMIO DEVICE DRIVER
 M:     Chandrasekar Ramakrishnan <rcsekar@samsung.com>
 L:     linux-can@vger.kernel.org
@@ -14424,6 +14431,13 @@ S:     Maintained
 F:     Documentation/devicetree/bindings/pci/hisilicon-histb-pcie.txt
 F:     drivers/pci/controller/dwc/pcie-histb.c
 
+PCIE DRIVER FOR INTEL LGM GW SOC
+M:     Rahul Tanwar <rtanwar@maxlinear.com>
+L:     linux-pci@vger.kernel.org
+S:     Maintained
+F:     Documentation/devicetree/bindings/pci/intel-gw-pcie.yaml
+F:     drivers/pci/controller/dwc/pcie-intel-gw.c
+
 PCIE DRIVER FOR MEDIATEK
 M:     Ryder Lee <ryder.lee@mediatek.com>
 M:     Jianjun Wang <jianjun.wang@mediatek.com>
@@ -15468,6 +15482,8 @@ M:      Pan, Xinhui <Xinhui.Pan@amd.com>
 L:     amd-gfx@lists.freedesktop.org
 S:     Supported
 T:     git https://gitlab.freedesktop.org/agd5f/linux.git
+B:     https://gitlab.freedesktop.org/drm/amd/-/issues
+C:     irc://irc.oftc.net/radeon
 F:     drivers/gpu/drm/amd/
 F:     drivers/gpu/drm/radeon/
 F:     include/uapi/drm/amdgpu_drm.h
@@ -15795,7 +15811,7 @@ F:      Documentation/devicetree/bindings/i2c/renesas,iic-emev2.yaml
 F:     drivers/i2c/busses/i2c-emev2.c
 
 RENESAS ETHERNET DRIVERS
-R:     Sergei Shtylyov <sergei.shtylyov@gmail.com>
+R:     Sergey Shtylyov <s.shtylyov@omp.ru>
 L:     netdev@vger.kernel.org
 L:     linux-renesas-soc@vger.kernel.org
 F:     Documentation/devicetree/bindings/net/renesas,*.yaml
@@ -17807,7 +17823,7 @@ F:      include/linux/sync_file.h
 F:     include/uapi/linux/sync_file.h
 
 SYNOPSYS ARC ARCHITECTURE
-M:     Vineet Gupta <vgupta@synopsys.com>
+M:     Vineet Gupta <vgupta@kernel.org>
 L:     linux-snps-arc@lists.infradead.org
 S:     Supported
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc.git
@@ -20009,7 +20025,8 @@ F:      Documentation/devicetree/bindings/extcon/wlf,arizona.yaml
 F:     Documentation/devicetree/bindings/mfd/wlf,arizona.yaml
 F:     Documentation/devicetree/bindings/mfd/wm831x.txt
 F:     Documentation/devicetree/bindings/regulator/wlf,arizona.yaml
-F:     Documentation/devicetree/bindings/sound/wlf,arizona.yaml
+F:     Documentation/devicetree/bindings/sound/wlf,*.yaml
+F:     Documentation/devicetree/bindings/sound/wm*
 F:     Documentation/hwmon/wm83??.rst
 F:     arch/arm/mach-s3c/mach-crag6410*
 F:     drivers/clk/clk-wm83*.c
index 6b555f6..61741e9 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 14
 SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION =
 NAME = Opossums on Parade
 
 # *DOCUMENTATION*
@@ -546,7 +546,6 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
 PHONY += scripts_basic
 scripts_basic:
        $(Q)$(MAKE) $(build)=scripts/basic
-       $(Q)rm -f .tmp_quiet_recordmcount
 
 PHONY += outputmakefile
 ifdef building_out_of_srctree
@@ -1317,6 +1316,16 @@ PHONY += scripts_unifdef
 scripts_unifdef: scripts_basic
        $(Q)$(MAKE) $(build)=scripts scripts/unifdef
 
+# ---------------------------------------------------------------------------
+# Install
+
+# Many distributions have the custom install script, /sbin/installkernel.
+# If DKMS is installed, 'make install' will eventually recuses back
+# to the this Makefile to build and install external modules.
+# Cancel sub_make_done so that options such as M=, V=, etc. are parsed.
+
+install: sub_make_done :=
+
 # ---------------------------------------------------------------------------
 # Tools
 
index 129df49..98db634 100644 (file)
@@ -1282,6 +1282,9 @@ config ARCH_SPLIT_ARG64
 config ARCH_HAS_ELFCORE_COMPAT
        bool
 
+config ARCH_HAS_PARANOID_L1D_FLUSH
+       bool
+
 source "kernel/gcov/Kconfig"
 
 source "scripts/gcc-plugins/Kconfig"
index 77d3280..6c50877 100644 (file)
@@ -14,7 +14,6 @@ config ALPHA
        select PCI_SYSCALL if PCI
        select HAVE_AOUT
        select HAVE_ASM_MODVERSIONS
-       select HAVE_IDE
        select HAVE_PCSPKR_PLATFORM
        select HAVE_PERF_EVENTS
        select NEED_DMA_MAP_STATE
@@ -532,7 +531,7 @@ config SMP
          will run faster if you say N here.
 
          See also the SMP-HOWTO available at
-         <http://www.tldp.org/docs.html#howto>.
+         <https://www.tldp.org/docs.html#howto>.
 
          If you don't know what to do here, say N.
 
index 00266e6..b4faba2 100644 (file)
@@ -23,7 +23,7 @@
 #include "ksize.h"
 
 extern unsigned long switch_to_osf_pal(unsigned long nr,
-       struct pcb_struct * pcb_va, struct pcb_struct * pcb_pa,
+       struct pcb_struct *pcb_va, struct pcb_struct *pcb_pa,
        unsigned long *vptb);
 
 extern void move_stack(unsigned long new_stack);
index 43af718..90a2b34 100644 (file)
@@ -200,7 +200,7 @@ extern char _end;
        START_ADDR      KSEG address of the entry point of kernel code.
 
        ZERO_PGE        KSEG address of page full of zeroes, but 
-                       upon entry to kerne cvan be expected
+                       upon entry to kernel, it can be expected
                        to hold the parameter list and possible
                        INTRD information.
 
index d651922..325d4dd 100644 (file)
@@ -30,7 +30,7 @@ extern long srm_printk(const char *, ...)
      __attribute__ ((format (printf, 1, 2)));
 
 /*
- * gzip delarations
+ * gzip declarations
  */
 #define OF(args)  args
 #define STATIC static
index dd2dd9f..7f1ca30 100644 (file)
@@ -70,3 +70,4 @@ CONFIG_DEBUG_INFO=y
 CONFIG_ALPHA_LEGACY_START_ADDRESS=y
 CONFIG_MATHEMU=y
 CONFIG_CRYPTO_HMAC=y
+CONFIG_DEVTMPFS=y
index 5159ba2..ae64595 100644 (file)
@@ -4,15 +4,4 @@
 
 #include <uapi/asm/compiler.h>
 
-/* Some idiots over in <linux/compiler.h> thought inline should imply
-   always_inline.  This breaks stuff.  We'll include this file whenever
-   we run into such problems.  */
-
-#include <linux/compiler.h>
-#undef inline
-#undef __inline__
-#undef __inline
-#undef __always_inline
-#define __always_inline                inline __attribute__((always_inline))
-
 #endif /* __ALPHA_COMPILER_H */
index 11c688c..f21baba 100644 (file)
@@ -9,4 +9,10 @@ static inline int syscall_get_arch(struct task_struct *task)
        return AUDIT_ARCH_ALPHA;
 }
 
+static inline long syscall_get_return_value(struct task_struct *task,
+                                           struct pt_regs *regs)
+{
+       return regs->r0;
+}
+
 #endif /* _ASM_ALPHA_SYSCALL_H */
index d5367a1..d31167e 100644 (file)
@@ -834,7 +834,7 @@ SYSCALL_DEFINE5(osf_setsysinfo, unsigned long, op, void __user *, buffer,
                        return -EFAULT;
                state = &current_thread_info()->ieee_state;
 
-               /* Update softare trap enable bits.  */
+               /* Update software trap enable bits.  */
                *state = (*state & ~IEEE_SW_MASK) | (swcr & IEEE_SW_MASK);
 
                /* Update the real fpcr.  */
@@ -854,7 +854,7 @@ SYSCALL_DEFINE5(osf_setsysinfo, unsigned long, op, void __user *, buffer,
                state = &current_thread_info()->ieee_state;
                exc &= IEEE_STATUS_MASK;
 
-               /* Update softare trap enable bits.  */
+               /* Update software trap enable bits.  */
                swcr = (*state & IEEE_SW_MASK) | exc;
                *state |= exc;
 
index e7a59d9..efcf732 100644 (file)
@@ -574,7 +574,7 @@ static void alpha_pmu_start(struct perf_event *event, int flags)
  * Check that CPU performance counters are supported.
  * - currently support EV67 and later CPUs.
  * - actually some later revisions of the EV6 have the same PMC model as the
- *     EV67 but we don't do suffiently deep CPU detection to detect them.
+ *     EV67 but we don't do sufficiently deep CPU detection to detect them.
  *     Bad luck to the very few people who might have one, I guess.
  */
 static int supported_cpu(void)
index ef0c08e..a5123ea 100644 (file)
@@ -256,7 +256,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
                childstack->r26 = (unsigned long) ret_from_kernel_thread;
                childstack->r9 = usp;   /* function */
                childstack->r10 = kthread_arg;
-               childregs->hae = alpha_mv.hae_cache,
+               childregs->hae = alpha_mv.hae_cache;
                childti->pcb.usp = 0;
                return 0;
        }
index 7d56c21..b4fbbba 100644 (file)
@@ -319,18 +319,19 @@ setup_memory(void *kernel_end)
                       i, cluster->usage, cluster->start_pfn,
                       cluster->start_pfn + cluster->numpages);
 
-               /* Bit 0 is console/PALcode reserved.  Bit 1 is
-                  non-volatile memory -- we might want to mark
-                  this for later.  */
-               if (cluster->usage & 3)
-                       continue;
-
                end = cluster->start_pfn + cluster->numpages;
                if (end > max_low_pfn)
                        max_low_pfn = end;
 
                memblock_add(PFN_PHYS(cluster->start_pfn),
                             cluster->numpages << PAGE_SHIFT);
+
+               /* Bit 0 is console/PALcode reserved.  Bit 1 is
+                  non-volatile memory -- we might want to mark
+                  this for later.  */
+               if (cluster->usage & 3)
+                       memblock_reserve(PFN_PHYS(cluster->start_pfn),
+                                        cluster->numpages << PAGE_SHIFT);
        }
 
        /*
index 4b2575f..cb64e47 100644 (file)
@@ -582,7 +582,7 @@ void
 smp_send_stop(void)
 {
        cpumask_t to_whom;
-       cpumask_copy(&to_whom, cpu_possible_mask);
+       cpumask_copy(&to_whom, cpu_online_mask);
        cpumask_clear_cpu(smp_processor_id(), &to_whom);
 #ifdef DEBUG_IPI_MSG
        if (hard_smp_processor_id() != boot_cpu_id)
index 53adf43..96fd6ff 100644 (file)
@@ -212,7 +212,7 @@ nautilus_init_pci(void)
 
        /* Use default IO. */
        pci_add_resource(&bridge->windows, &ioport_resource);
-       /* Irongate PCI memory aperture, calculate requred size before
+       /* Irongate PCI memory aperture, calculate required size before
           setting it up. */
        pci_add_resource(&bridge->windows, &irongate_mem);
 
index 921d4b6..5398f98 100644 (file)
@@ -730,7 +730,7 @@ do_entUnaUser(void __user * va, unsigned long opcode,
        long error;
 
        /* Check the UAC bits to decide what the user wants us to do
-          with the unaliged access.  */
+          with the unaligned access.  */
 
        if (!(current_thread_info()->status & TS_UAC_NOPRINT)) {
                if (__ratelimit(&ratelimit)) {
index d568cd9..f7cef66 100644 (file)
@@ -65,7 +65,7 @@ static long (*save_emul) (unsigned long pc);
 long do_alpha_fp_emul_imprecise(struct pt_regs *, unsigned long);
 long do_alpha_fp_emul(unsigned long);
 
-int init_module(void)
+static int alpha_fp_emul_init_module(void)
 {
        save_emul_imprecise = alpha_fp_emul_imprecise;
        save_emul = alpha_fp_emul;
@@ -73,12 +73,14 @@ int init_module(void)
        alpha_fp_emul = do_alpha_fp_emul;
        return 0;
 }
+module_init(alpha_fp_emul_init_module);
 
-void cleanup_module(void)
+static void alpha_fp_emul_cleanup_module(void)
 {
        alpha_fp_emul_imprecise = save_emul_imprecise;
        alpha_fp_emul = save_emul;
 }
+module_exit(alpha_fp_emul_cleanup_module);
 
 #undef  alpha_fp_emul_imprecise
 #define alpha_fp_emul_imprecise                do_alpha_fp_emul_imprecise
@@ -401,3 +403,5 @@ alpha_fp_emul_imprecise (struct pt_regs *regs, unsigned long write_mask)
 egress:
        return si_code;
 }
+
+EXPORT_SYMBOL(__udiv_qrnnd);
index d8f51eb..b5bf68e 100644 (file)
@@ -409,7 +409,7 @@ choice
        help
          Depending on the configuration, CPU can contain DSP registers
          (ACC0_GLO, ACC0_GHI, DSP_BFLY0, DSP_CTRL, DSP_FFT_CTRL).
-         Bellow is options describing how to handle these registers in
+         Below are options describing how to handle these registers in
          interrupt entry / exit and in context switch.
 
 config ARC_DSP_NONE
index 69debd7..0b48580 100644 (file)
@@ -24,7 +24,7 @@
  */
 static inline __sum16 csum_fold(__wsum s)
 {
-       unsigned r = s << 16 | s >> 16; /* ror */
+       unsigned int r = s << 16 | s >> 16;     /* ror */
        s = ~s;
        s -= r;
        return s >> 16;
index 30b9ae5..e1971d3 100644 (file)
@@ -123,7 +123,7 @@ static const char * const arc_pmu_ev_hw_map[] = {
 #define C(_x)                  PERF_COUNT_HW_CACHE_##_x
 #define CACHE_OP_UNSUPPORTED   0xffff
 
-static const unsigned arc_pmu_cache_map[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
+static const unsigned int arc_pmu_cache_map[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
        [C(L1D)] = {
                [C(OP_READ)] = {
                        [C(RESULT_ACCESS)]      = PERF_COUNT_ARC_LDC,
index c67c0f0..ec64021 100644 (file)
@@ -57,23 +57,26 @@ void fpu_save_restore(struct task_struct *prev, struct task_struct *next)
 
 void fpu_init_task(struct pt_regs *regs)
 {
+       const unsigned int fwe = 0x80000000;
+
        /* default rounding mode */
        write_aux_reg(ARC_REG_FPU_CTRL, 0x100);
 
-       /* set "Write enable" to allow explicit write to exception flags */
-       write_aux_reg(ARC_REG_FPU_STATUS, 0x80000000);
+       /* Initialize to zero: setting requires FWE be set */
+       write_aux_reg(ARC_REG_FPU_STATUS, fwe);
 }
 
 void fpu_save_restore(struct task_struct *prev, struct task_struct *next)
 {
        struct arc_fpu *save = &prev->thread.fpu;
        struct arc_fpu *restore = &next->thread.fpu;
+       const unsigned int fwe = 0x80000000;
 
        save->ctrl = read_aux_reg(ARC_REG_FPU_CTRL);
        save->status = read_aux_reg(ARC_REG_FPU_STATUS);
 
        write_aux_reg(ARC_REG_FPU_CTRL, restore->ctrl);
-       write_aux_reg(ARC_REG_FPU_STATUS, restore->status);
+       write_aux_reg(ARC_REG_FPU_STATUS, (fwe | restore->status));
 }
 
 #endif
index abf9398..f9fdb55 100644 (file)
@@ -352,7 +352,7 @@ static void idu_cascade_isr(struct irq_desc *desc)
        irq_hw_number_t idu_hwirq = core_hwirq - FIRST_EXT_IRQ;
 
        chained_irq_enter(core_chip, desc);
-       generic_handle_irq(irq_find_mapping(idu_domain, idu_hwirq));
+       generic_handle_domain_irq(idu_domain, idu_hwirq);
        chained_irq_exit(core_chip, desc);
 }
 
index 47bab67..9e28058 100644 (file)
@@ -260,7 +260,7 @@ static void init_unwind_hdr(struct unwind_table *table,
 {
        const u8 *ptr;
        unsigned long tableSize = table->size, hdrSize;
-       unsigned n;
+       unsigned int n;
        const u32 *fde;
        struct {
                u8 version;
@@ -462,7 +462,7 @@ static uleb128_t get_uleb128(const u8 **pcur, const u8 *end)
 {
        const u8 *cur = *pcur;
        uleb128_t value;
-       unsigned shift;
+       unsigned int shift;
 
        for (shift = 0, value = 0; cur < end; shift += 7) {
                if (shift + 7 > 8 * sizeof(value)
@@ -483,7 +483,7 @@ static sleb128_t get_sleb128(const u8 **pcur, const u8 *end)
 {
        const u8 *cur = *pcur;
        sleb128_t value;
-       unsigned shift;
+       unsigned int shift;
 
        for (shift = 0, value = 0; cur < end; shift += 7) {
                if (shift + 7 > 8 * sizeof(value)
@@ -609,7 +609,7 @@ static unsigned long read_pointer(const u8 **pLoc, const void *end,
 static signed fde_pointer_type(const u32 *cie)
 {
        const u8 *ptr = (const u8 *)(cie + 2);
-       unsigned version = *ptr;
+       unsigned int version = *ptr;
 
        if (*++ptr) {
                const char *aug;
@@ -904,7 +904,7 @@ int arc_unwind(struct unwind_frame_info *frame)
        const u8 *ptr = NULL, *end = NULL;
        unsigned long pc = UNW_PC(frame) - frame->call_frame;
        unsigned long startLoc = 0, endLoc = 0, cfa;
-       unsigned i;
+       unsigned int i;
        signed ptrType = -1;
        uleb128_t retAddrReg = 0;
        const struct unwind_table *table;
index e2146a8..529ae50 100644 (file)
@@ -88,6 +88,8 @@ SECTIONS
                CPUIDLE_TEXT
                LOCK_TEXT
                KPROBES_TEXT
+               IRQENTRY_TEXT
+               SOFTIRQENTRY_TEXT
                *(.fixup)
                *(.gnu.warning)
        }
index 82f908f..2fb7012 100644 (file)
@@ -95,7 +95,6 @@ config ARM
        select HAVE_FUNCTION_TRACER if !XIP_KERNEL
        select HAVE_GCC_PLUGINS
        select HAVE_HW_BREAKPOINT if PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)
-       select HAVE_IDE if PCI || ISA || PCMCIA
        select HAVE_IRQ_TIME_ACCOUNTING
        select HAVE_KERNEL_GZIP
        select HAVE_KERNEL_LZ4
@@ -361,7 +360,6 @@ config ARCH_FOOTBRIDGE
        bool "FootBridge"
        select CPU_SA110
        select FOOTBRIDGE
-       select HAVE_IDE
        select NEED_MACH_IO_H if !MMU
        select NEED_MACH_MEMORY_H
        help
@@ -430,7 +428,6 @@ config ARCH_PXA
        select GENERIC_IRQ_MULTI_HANDLER
        select GPIO_PXA
        select GPIOLIB
-       select HAVE_IDE
        select IRQ_DOMAIN
        select PLAT_PXA
        select SPARSE_IRQ
@@ -446,7 +443,6 @@ config ARCH_RPC
        select ARM_HAS_SG_CHAIN
        select CPU_SA110
        select FIQ
-       select HAVE_IDE
        select HAVE_PATA_PLATFORM
        select ISA_DMA_API
        select LEGACY_TIMER_TICK
@@ -469,7 +465,6 @@ config ARCH_SA1100
        select CPU_SA1100
        select GENERIC_IRQ_MULTI_HANDLER
        select GPIOLIB
-       select HAVE_IDE
        select IRQ_DOMAIN
        select ISA
        select NEED_MACH_MEMORY_H
@@ -505,7 +500,6 @@ config ARCH_OMAP1
        select GENERIC_IRQ_CHIP
        select GENERIC_IRQ_MULTI_HANDLER
        select GPIOLIB
-       select HAVE_IDE
        select HAVE_LEGACY_CLK
        select IRQ_DOMAIN
        select NEED_MACH_IO_H if PCCARD
index 40ef397..ba58e6b 100644 (file)
                                compatible = "ti,am4372-d_can", "ti,am3352-d_can";
                                reg = <0x0 0x2000>;
                                clocks = <&dcan1_fck>;
-                               clock-name = "fck";
+                               clock-names = "fck";
                                syscon-raminit = <&scm_conf 0x644 1>;
                                interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
                                status = "disabled";
index aae0af1..2aa75ab 100644 (file)
        status = "okay";
        pinctrl-names = "default";
        pinctrl-0 = <&i2c0_pins>;
-       clock-frequency = <400000>;
+       clock-frequency = <100000>;
 
        tps65218: tps65218@24 {
                reg = <0x24>;
index f98691a..d3082b9 100644 (file)
 
                pinctrl_power_button: powerbutgrp {
                        fsl,pins = <
-                               MX53_PAD_SD2_DATA2__GPIO1_13            0x1e4
+                               MX53_PAD_SD2_DATA0__GPIO1_15            0x1e4
                        >;
                };
 
                pinctrl_power_out: poweroutgrp {
                        fsl,pins = <
-                               MX53_PAD_SD2_DATA0__GPIO1_15            0x1e4
+                               MX53_PAD_SD2_DATA2__GPIO1_13            0x1e4
                        >;
                };
 
index 0ad8ccd..f86efd0 100644 (file)
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_microsom_enet_ar8035>;
        phy-mode = "rgmii-id";
-       phy-reset-duration = <2>;
+
+       /*
+        * The PHY seems to require a long-enough reset duration to avoid
+        * some rare issues where the PHY gets stuck in an inconsistent and
+        * non-functional state at boot-up. 10ms proved to be fine .
+        */
+       phy-reset-duration = <10>;
        phy-reset-gpios = <&gpio4 15 GPIO_ACTIVE_LOW>;
        status = "okay";
 
index a054543..9f1e382 100644 (file)
@@ -43,6 +43,7 @@
        assigned-clock-rates = <0>, <198000000>;
        cap-power-off-card;
        keep-power-in-suspend;
+       max-frequency = <25000000>;
        mmc-pwrseq = <&wifi_pwrseq>;
        no-1-8-v;
        non-removable;
index 45435bb..373984c 100644 (file)
                regulator-max-microvolt = <5000000>;
        };
 
-       vdds_1v8_main: fixedregulator-vdds_1v8_main {
-               compatible = "regulator-fixed";
-               regulator-name = "vdds_1v8_main";
-               vin-supply = <&smps7_reg>;
-               regulator-min-microvolt = <1800000>;
-               regulator-max-microvolt = <1800000>;
-       };
-
        vmmcsd_fixed: fixedregulator-mmcsd {
                compatible = "regulator-fixed";
                regulator-name = "vmmcsd_fixed";
                                        regulator-boot-on;
                                };
 
+                               vdds_1v8_main:
                                smps7_reg: smps7 {
                                        /* VDDS_1v8_OMAP over VDDS_1v8_MAIN */
                                        regulator-name = "smps7";
index c9b9064..1815361 100644 (file)
                        status = "disabled";
                };
 
-               vica: intc@10140000 {
+               vica: interrupt-controller@10140000 {
                        compatible = "arm,versatile-vic";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        reg = <0x10140000 0x20>;
                };
 
-               vicb: intc@10140020 {
+               vicb: interrupt-controller@10140020 {
                        compatible = "arm,versatile-vic";
                        interrupt-controller;
                        #interrupt-cells = <1>;
index c5ea08f..6cf1c8b 100644 (file)
@@ -37,7 +37,7 @@
                poll-interval = <20>;
 
                /*
-                * The EXTi IRQ line 3 is shared with touchscreen and ethernet,
+                * The EXTi IRQ line 3 is shared with ethernet,
                 * so mark this as polled GPIO key.
                 */
                button-0 {
                        gpios = <&gpiof 3 GPIO_ACTIVE_LOW>;
                };
 
+               /*
+                * The EXTi IRQ line 6 is shared with touchscreen,
+                * so mark this as polled GPIO key.
+                */
+               button-1 {
+                       label = "TA2-GPIO-B";
+                       linux,code = <KEY_B>;
+                       gpios = <&gpiod 6 GPIO_ACTIVE_LOW>;
+               };
+
                /*
                 * The EXTi IRQ line 0 is shared with PMIC,
                 * so mark this as polled GPIO key.
        gpio-keys {
                compatible = "gpio-keys";
 
-               button-1 {
-                       label = "TA2-GPIO-B";
-                       linux,code = <KEY_B>;
-                       gpios = <&gpiod 6 GPIO_ACTIVE_LOW>;
-                       wakeup-source;
-               };
-
                button-3 {
                        label = "TA4-GPIO-D";
                        linux,code = <KEY_D>;
@@ -82,6 +85,7 @@
                        label = "green:led5";
                        gpios = <&gpioc 6 GPIO_ACTIVE_HIGH>;
                        default-state = "off";
+                       status = "disabled";
                };
 
                led-1 {
        touchscreen@38 {
                compatible = "edt,edt-ft5406";
                reg = <0x38>;
-               interrupt-parent = <&gpiog>;
-               interrupts = <2 IRQ_TYPE_EDGE_FALLING>; /* GPIO E */
+               interrupt-parent = <&gpioc>;
+               interrupts = <6 IRQ_TYPE_EDGE_FALLING>; /* GPIO E */
        };
 };
 
index 2af0a67..8c41f81 100644 (file)
@@ -12,6 +12,8 @@
        aliases {
                ethernet0 = &ethernet0;
                ethernet1 = &ksz8851;
+               rtc0 = &hwrtc;
+               rtc1 = &rtc;
        };
 
        memory@c0000000 {
                        reset-gpios = <&gpioh 3 GPIO_ACTIVE_LOW>;
                        reset-assert-us = <500>;
                        reset-deassert-us = <500>;
+                       smsc,disable-energy-detect;
                        interrupt-parent = <&gpioi>;
                        interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
                };
        /delete-property/dmas;
        /delete-property/dma-names;
 
-       rtc@32 {
+       hwrtc: rtc@32 {
                compatible = "microcrystal,rv8803";
                reg = <0x32>;
        };
index ff5e0d0..d17083c 100644 (file)
@@ -196,14 +196,6 @@ static int sa1111_map_irq(struct sa1111 *sachip, irq_hw_number_t hwirq)
        return irq_create_mapping(sachip->irqdomain, hwirq);
 }
 
-static void sa1111_handle_irqdomain(struct irq_domain *irqdomain, int irq)
-{
-       struct irq_desc *d = irq_to_desc(irq_linear_revmap(irqdomain, irq));
-
-       if (d)
-               generic_handle_irq_desc(d);
-}
-
 /*
  * SA1111 interrupt support.  Since clearing an IRQ while there are
  * active IRQs causes the interrupt output to pulse, the upper levels
@@ -234,11 +226,11 @@ static void sa1111_irq_handler(struct irq_desc *desc)
 
        for (i = 0; stat0; i++, stat0 >>= 1)
                if (stat0 & 1)
-                       sa1111_handle_irqdomain(irqdomain, i);
+                       generic_handle_domain_irq(irqdomain, i);
 
        for (i = 32; stat1; i++, stat1 >>= 1)
                if (stat1 & 1)
-                       sa1111_handle_irqdomain(irqdomain, i);
+                       generic_handle_domain_irq(irqdomain, i);
 
        /* For level-based interrupts */
        desc->irq_data.chip->irq_unmask(&desc->irq_data);
index 3f35761..23595fc 100644 (file)
@@ -15,8 +15,6 @@ CONFIG_SLAB=y
 CONFIG_ARCH_NOMADIK=y
 CONFIG_MACH_NOMADIK_8815NHK=y
 CONFIG_AEABI=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
@@ -52,9 +50,9 @@ CONFIG_MTD_BLOCK=y
 CONFIG_MTD_ONENAND=y
 CONFIG_MTD_ONENAND_VERIFY_WRITE=y
 CONFIG_MTD_ONENAND_GENERIC=y
-CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC=y
 CONFIG_MTD_RAW_NAND=y
 CONFIG_MTD_NAND_FSMC=y
+CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_CRYPTOLOOP=y
 CONFIG_BLK_DEV_RAM=y
@@ -97,6 +95,7 @@ CONFIG_REGULATOR=y
 CONFIG_DRM=y
 CONFIG_DRM_PANEL_TPO_TPG110=y
 CONFIG_DRM_PL111=y
+CONFIG_FB=y
 CONFIG_BACKLIGHT_CLASS_DEVICE=y
 CONFIG_BACKLIGHT_PWM=y
 CONFIG_FRAMEBUFFER_CONSOLE=y
@@ -136,9 +135,8 @@ CONFIG_NLS_ISO8859_15=y
 CONFIG_CRYPTO_MD5=y
 CONFIG_CRYPTO_SHA1=y
 CONFIG_CRYPTO_DES=y
+# CONFIG_DEBUG_BUGVERBOSE is not set
 CONFIG_DEBUG_INFO=y
-# CONFIG_ENABLE_MUST_CHECK is not set
 CONFIG_DEBUG_FS=y
 # CONFIG_SCHED_DEBUG is not set
 # CONFIG_DEBUG_PREEMPT is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
index 31eb75b..9bdafd5 100644 (file)
@@ -112,7 +112,7 @@ static struct kpp_alg curve25519_alg = {
        .max_size               = curve25519_max_size,
 };
 
-static int __init mod_init(void)
+static int __init arm_curve25519_init(void)
 {
        if (elf_hwcap & HWCAP_NEON) {
                static_branch_enable(&have_neon);
@@ -122,14 +122,14 @@ static int __init mod_init(void)
        return 0;
 }
 
-static void __exit mod_exit(void)
+static void __exit arm_curve25519_exit(void)
 {
        if (IS_REACHABLE(CONFIG_CRYPTO_KPP) && elf_hwcap & HWCAP_NEON)
                crypto_unregister_kpp(&curve25519_alg);
 }
 
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(arm_curve25519_init);
+module_exit(arm_curve25519_exit);
 
 MODULE_ALIAS_CRYPTO("curve25519");
 MODULE_ALIAS_CRYPTO("curve25519-neon");
index cfc9dfd..f673e13 100644 (file)
@@ -160,10 +160,11 @@ extern unsigned long vectors_base;
 
 /*
  * Physical start and end address of the kernel sections. These addresses are
- * 2MB-aligned to match the section mappings placed over the kernel.
+ * 2MB-aligned to match the section mappings placed over the kernel. We use
+ * u64 so that LPAE mappings beyond the 32bit limit will work out as well.
  */
-extern u32 kernel_sec_start;
-extern u32 kernel_sec_end;
+extern u64 kernel_sec_start;
+extern u64 kernel_sec_end;
 
 /*
  * Physical vs virtual RAM address space conversion.  These are
index 9eb0b4d..29070eb 100644 (file)
@@ -49,7 +49,8 @@
 
        /*
         * This needs to be assigned at runtime when the linker symbols are
-        * resolved.
+        * resolved. These are unsigned 64bit really, but in this assembly code
+        * We store them as 32bit.
         */
        .pushsection .data
        .align  2
@@ -57,7 +58,9 @@
        .globl  kernel_sec_end
 kernel_sec_start:
        .long   0
+       .long   0
 kernel_sec_end:
+       .long   0
        .long   0
        .popsection
 
@@ -250,7 +253,11 @@ __create_page_tables:
        add     r0, r4, #KERNEL_OFFSET >> (SECTION_SHIFT - PMD_ORDER)
        ldr     r6, =(_end - 1)
        adr_l   r5, kernel_sec_start            @ _pa(kernel_sec_start)
-       str     r8, [r5]                        @ Save physical start of kernel
+#ifdef CONFIG_CPU_ENDIAN_BE8
+       str     r8, [r5, #4]                    @ Save physical start of kernel (BE)
+#else
+       str     r8, [r5]                        @ Save physical start of kernel (LE)
+#endif
        orr     r3, r8, r7                      @ Add the MMU flags
        add     r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER)
 1:     str     r3, [r0], #1 << PMD_ORDER
@@ -259,7 +266,11 @@ __create_page_tables:
        bls     1b
        eor     r3, r3, r7                      @ Remove the MMU flags
        adr_l   r5, kernel_sec_end              @ _pa(kernel_sec_end)
-       str     r3, [r5]                        @ Save physical end of kernel
+#ifdef CONFIG_CPU_ENDIAN_BE8
+       str     r3, [r5, #4]                    @ Save physical end of kernel (BE)
+#else
+       str     r3, [r5]                        @ Save physical end of kernel (LE)
+#endif
 
 #ifdef CONFIG_XIP_KERNEL
        /*
index de11030..1d3aef8 100644 (file)
@@ -9,7 +9,6 @@ menuconfig ARCH_DAVINCI
        select PM_GENERIC_DOMAINS_OF if PM && OF
        select REGMAP_MMIO
        select RESET_CONTROLLER
-       select HAVE_IDE
        select PINCTRL_SINGLE
 
 if ARCH_DAVINCI
index f0a073a..13f3068 100644 (file)
@@ -68,7 +68,6 @@ void imx_set_cpu_arg(int cpu, u32 arg);
 void v7_secondary_startup(void);
 void imx_scu_map_io(void);
 void imx_smp_prepare(void);
-void imx_gpcv2_set_core1_pdn_pup_by_software(bool pdn);
 #else
 static inline void imx_scu_map_io(void) {}
 static inline void imx_smp_prepare(void) {}
@@ -81,6 +80,7 @@ void imx_gpc_mask_all(void);
 void imx_gpc_restore_all(void);
 void imx_gpc_hwirq_mask(unsigned int hwirq);
 void imx_gpc_hwirq_unmask(unsigned int hwirq);
+void imx_gpcv2_set_core1_pdn_pup_by_software(bool pdn);
 void imx_anatop_init(void);
 void imx_anatop_pre_suspend(void);
 void imx_anatop_post_resume(void);
index 0dfd0ae..af12668 100644 (file)
@@ -103,6 +103,7 @@ struct mmdc_pmu {
        struct perf_event *mmdc_events[MMDC_NUM_COUNTERS];
        struct hlist_node node;
        struct fsl_mmdc_devtype_data *devtype_data;
+       struct clk *mmdc_ipg_clk;
 };
 
 /*
@@ -462,11 +463,14 @@ static int imx_mmdc_remove(struct platform_device *pdev)
 
        cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node);
        perf_pmu_unregister(&pmu_mmdc->pmu);
+       iounmap(pmu_mmdc->mmdc_base);
+       clk_disable_unprepare(pmu_mmdc->mmdc_ipg_clk);
        kfree(pmu_mmdc);
        return 0;
 }
 
-static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_base)
+static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_base,
+                             struct clk *mmdc_ipg_clk)
 {
        struct mmdc_pmu *pmu_mmdc;
        char *name;
@@ -494,6 +498,7 @@ static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_b
        }
 
        mmdc_num = mmdc_pmu_init(pmu_mmdc, mmdc_base, &pdev->dev);
+       pmu_mmdc->mmdc_ipg_clk = mmdc_ipg_clk;
        if (mmdc_num == 0)
                name = "mmdc";
        else
@@ -529,7 +534,7 @@ pmu_free:
 
 #else
 #define imx_mmdc_remove NULL
-#define imx_mmdc_perf_init(pdev, mmdc_base) 0
+#define imx_mmdc_perf_init(pdev, mmdc_base, mmdc_ipg_clk) 0
 #endif
 
 static int imx_mmdc_probe(struct platform_device *pdev)
@@ -567,7 +572,13 @@ static int imx_mmdc_probe(struct platform_device *pdev)
        val &= ~(1 << BP_MMDC_MAPSR_PSD);
        writel_relaxed(val, reg);
 
-       return imx_mmdc_perf_init(pdev, mmdc_base);
+       err = imx_mmdc_perf_init(pdev, mmdc_base, mmdc_ipg_clk);
+       if (err) {
+               iounmap(mmdc_base);
+               clk_disable_unprepare(mmdc_ipg_clk);
+       }
+
+       return err;
 }
 
 int imx_mmdc_get_ddr_type(void)
index bf14d65..34a1c77 100644 (file)
@@ -91,6 +91,7 @@ config MACH_IXDP465
 
 config MACH_GORAMO_MLR
        bool "GORAMO Multi Link Router"
+       depends on IXP4XX_PCI_LEGACY
        help
          Say 'Y' here if you want your kernel to support GORAMO
          MultiLink router.
index abb07f1..74e63d4 100644 (file)
 /*
  * PCI Control/Status Registers
  */
-#define IXP4XX_PCI_CSR(x) ((volatile u32 *)(IXP4XX_PCI_CFG_BASE_VIRT+(x)))
-
-#define PCI_NP_AD               IXP4XX_PCI_CSR(PCI_NP_AD_OFFSET)
-#define PCI_NP_CBE              IXP4XX_PCI_CSR(PCI_NP_CBE_OFFSET)
-#define PCI_NP_WDATA            IXP4XX_PCI_CSR(PCI_NP_WDATA_OFFSET)
-#define PCI_NP_RDATA            IXP4XX_PCI_CSR(PCI_NP_RDATA_OFFSET)
-#define PCI_CRP_AD_CBE          IXP4XX_PCI_CSR(PCI_CRP_AD_CBE_OFFSET)
-#define PCI_CRP_WDATA           IXP4XX_PCI_CSR(PCI_CRP_WDATA_OFFSET)
-#define PCI_CRP_RDATA           IXP4XX_PCI_CSR(PCI_CRP_RDATA_OFFSET)
-#define PCI_CSR                 IXP4XX_PCI_CSR(PCI_CSR_OFFSET) 
-#define PCI_ISR                 IXP4XX_PCI_CSR(PCI_ISR_OFFSET)
-#define PCI_INTEN               IXP4XX_PCI_CSR(PCI_INTEN_OFFSET)
-#define PCI_DMACTRL             IXP4XX_PCI_CSR(PCI_DMACTRL_OFFSET)
-#define PCI_AHBMEMBASE          IXP4XX_PCI_CSR(PCI_AHBMEMBASE_OFFSET)
-#define PCI_AHBIOBASE           IXP4XX_PCI_CSR(PCI_AHBIOBASE_OFFSET)
-#define PCI_PCIMEMBASE          IXP4XX_PCI_CSR(PCI_PCIMEMBASE_OFFSET)
-#define PCI_AHBDOORBELL         IXP4XX_PCI_CSR(PCI_AHBDOORBELL_OFFSET)
-#define PCI_PCIDOORBELL         IXP4XX_PCI_CSR(PCI_PCIDOORBELL_OFFSET)
-#define PCI_ATPDMA0_AHBADDR     IXP4XX_PCI_CSR(PCI_ATPDMA0_AHBADDR_OFFSET)
-#define PCI_ATPDMA0_PCIADDR     IXP4XX_PCI_CSR(PCI_ATPDMA0_PCIADDR_OFFSET)
-#define PCI_ATPDMA0_LENADDR     IXP4XX_PCI_CSR(PCI_ATPDMA0_LENADDR_OFFSET)
-#define PCI_ATPDMA1_AHBADDR     IXP4XX_PCI_CSR(PCI_ATPDMA1_AHBADDR_OFFSET)
-#define PCI_ATPDMA1_PCIADDR     IXP4XX_PCI_CSR(PCI_ATPDMA1_PCIADDR_OFFSET)
-#define PCI_ATPDMA1_LENADDR     IXP4XX_PCI_CSR(PCI_ATPDMA1_LENADDR_OFFSET)
+#define _IXP4XX_PCI_CSR(x) ((volatile u32 *)(IXP4XX_PCI_CFG_BASE_VIRT+(x)))
+
+#define PCI_NP_AD               _IXP4XX_PCI_CSR(PCI_NP_AD_OFFSET)
+#define PCI_NP_CBE              _IXP4XX_PCI_CSR(PCI_NP_CBE_OFFSET)
+#define PCI_NP_WDATA            _IXP4XX_PCI_CSR(PCI_NP_WDATA_OFFSET)
+#define PCI_NP_RDATA            _IXP4XX_PCI_CSR(PCI_NP_RDATA_OFFSET)
+#define PCI_CRP_AD_CBE          _IXP4XX_PCI_CSR(PCI_CRP_AD_CBE_OFFSET)
+#define PCI_CRP_WDATA           _IXP4XX_PCI_CSR(PCI_CRP_WDATA_OFFSET)
+#define PCI_CRP_RDATA           _IXP4XX_PCI_CSR(PCI_CRP_RDATA_OFFSET)
+#define PCI_CSR                 _IXP4XX_PCI_CSR(PCI_CSR_OFFSET) 
+#define PCI_ISR                 _IXP4XX_PCI_CSR(PCI_ISR_OFFSET)
+#define PCI_INTEN               _IXP4XX_PCI_CSR(PCI_INTEN_OFFSET)
+#define PCI_DMACTRL             _IXP4XX_PCI_CSR(PCI_DMACTRL_OFFSET)
+#define PCI_AHBMEMBASE          _IXP4XX_PCI_CSR(PCI_AHBMEMBASE_OFFSET)
+#define PCI_AHBIOBASE           _IXP4XX_PCI_CSR(PCI_AHBIOBASE_OFFSET)
+#define PCI_PCIMEMBASE          _IXP4XX_PCI_CSR(PCI_PCIMEMBASE_OFFSET)
+#define PCI_AHBDOORBELL         _IXP4XX_PCI_CSR(PCI_AHBDOORBELL_OFFSET)
+#define PCI_PCIDOORBELL         _IXP4XX_PCI_CSR(PCI_PCIDOORBELL_OFFSET)
+#define PCI_ATPDMA0_AHBADDR     _IXP4XX_PCI_CSR(PCI_ATPDMA0_AHBADDR_OFFSET)
+#define PCI_ATPDMA0_PCIADDR     _IXP4XX_PCI_CSR(PCI_ATPDMA0_PCIADDR_OFFSET)
+#define PCI_ATPDMA0_LENADDR     _IXP4XX_PCI_CSR(PCI_ATPDMA0_LENADDR_OFFSET)
+#define PCI_ATPDMA1_AHBADDR     _IXP4XX_PCI_CSR(PCI_ATPDMA1_AHBADDR_OFFSET)
+#define PCI_ATPDMA1_PCIADDR     _IXP4XX_PCI_CSR(PCI_ATPDMA1_PCIADDR_OFFSET)
+#define PCI_ATPDMA1_LENADDR     _IXP4XX_PCI_CSR(PCI_ATPDMA1_LENADDR_OFFSET)
 
 /*
  * PCI register values and bit definitions 
index 65934b2..12b26e0 100644 (file)
@@ -3776,6 +3776,7 @@ struct powerdomain *omap_hwmod_get_pwrdm(struct omap_hwmod *oh)
        struct omap_hwmod_ocp_if *oi;
        struct clockdomain *clkdm;
        struct clk_hw_omap *clk;
+       struct clk_hw *hw;
 
        if (!oh)
                return NULL;
@@ -3792,7 +3793,14 @@ struct powerdomain *omap_hwmod_get_pwrdm(struct omap_hwmod *oh)
                c = oi->_clk;
        }
 
-       clk = to_clk_hw_omap(__clk_get_hw(c));
+       hw = __clk_get_hw(c);
+       if (!hw)
+               return NULL;
+
+       clk = to_clk_hw_omap(hw);
+       if (!clk)
+               return NULL;
+
        clkdm = clk->clkdm;
        if (!clkdm)
                return NULL;
index bddfc7c..eda5a47 100644 (file)
@@ -39,10 +39,8 @@ static irqreturn_t cplds_irq_handler(int in_irq, void *d)
 
        do {
                pending = readl(fpga->base + FPGA_IRQ_SET_CLR) & fpga->irq_mask;
-               for_each_set_bit(bit, &pending, CPLDS_NB_IRQ) {
-                       generic_handle_irq(irq_find_mapping(fpga->irqdomain,
-                                                           bit));
-               }
+               for_each_set_bit(bit, &pending, CPLDS_NB_IRQ)
+                       generic_handle_domain_irq(fpga->irqdomain, bit);
        } while (pending);
 
        return IRQ_HANDLED;
index d23970b..f70fb9c 100644 (file)
@@ -49,6 +49,7 @@ static int __init parse_tag_acorn(const struct tag *tag)
                fallthrough;    /* ??? */
        case 256:
                vram_size += PAGE_SIZE * 256;
+               break;
        default:
                break;
        }
index 0c631c1..3edc5f6 100644 (file)
@@ -298,7 +298,7 @@ static void s3c_irq_demux(struct irq_desc *desc)
        struct s3c_irq_data *irq_data = irq_desc_get_chip_data(desc);
        struct s3c_irq_intc *intc = irq_data->intc;
        struct s3c_irq_intc *sub_intc = irq_data->sub_intc;
-       unsigned int n, offset, irq;
+       unsigned int n, offset;
        unsigned long src, msk;
 
        /* we're using individual domains for the non-dt case
@@ -318,8 +318,7 @@ static void s3c_irq_demux(struct irq_desc *desc)
        while (src) {
                n = __ffs(src);
                src &= ~(1 << n);
-               irq = irq_find_mapping(sub_intc->domain, offset + n);
-               generic_handle_irq(irq);
+               generic_handle_domain_irq(sub_intc->domain, offset + n);
        }
 
        chained_irq_exit(chip, desc);
index 7583bda..a4e0060 100644 (file)
@@ -1608,6 +1608,13 @@ static void __init early_paging_init(const struct machine_desc *mdesc)
        if (offset == 0)
                return;
 
+       /*
+        * Offset the kernel section physical offsets so that the kernel
+        * mapping will work out later on.
+        */
+       kernel_sec_start += offset;
+       kernel_sec_end += offset;
+
        /*
         * Get the address of the remap function in the 1:1 identity
         * mapping setup by the early page table assembly code.  We
@@ -1716,7 +1723,7 @@ void __init paging_init(const struct machine_desc *mdesc)
 {
        void *zero_page;
 
-       pr_debug("physical kernel sections: 0x%08x-0x%08x\n",
+       pr_debug("physical kernel sections: 0x%08llx-0x%08llx\n",
                 kernel_sec_start, kernel_sec_end);
 
        prepare_page_table();
index 5c5e195..f8e11f7 100644 (file)
@@ -29,7 +29,7 @@ ENTRY(lpae_pgtables_remap_asm)
        ldr     r6, =(_end - 1)
        add     r7, r2, #0x1000
        add     r6, r7, r6, lsr #SECTION_SHIFT - L2_ORDER
-       add     r7, r7, #PAGE_OFFSET >> (SECTION_SHIFT - L2_ORDER)
+       add     r7, r7, #KERNEL_OFFSET >> (SECTION_SHIFT - L2_ORDER)
 1:     ldrd    r4, r5, [r7]
        adds    r4, r4, r0
        adc     r5, r5, r1
index 897634d..a951276 100644 (file)
@@ -1602,6 +1602,9 @@ exit:
                rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
                emit_ldx_r(dst, rn, off, ctx, BPF_SIZE(code));
                break;
+       /* speculation barrier */
+       case BPF_ST | BPF_NOSPEC:
+               break;
        /* ST: *(size *)(dst + off) = imm */
        case BPF_ST | BPF_MEM | BPF_W:
        case BPF_ST | BPF_MEM | BPF_H:
index b5b13a9..62c3c1d 100644 (file)
@@ -156,6 +156,7 @@ config ARM64
        select HAVE_ARCH_KGDB
        select HAVE_ARCH_MMAP_RND_BITS
        select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
+       select HAVE_ARCH_PFN_VALID
        select HAVE_ARCH_PREL32_RELOCATIONS
        select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
        select HAVE_ARCH_SECCOMP_FILTER
@@ -1800,11 +1801,11 @@ config RANDOMIZE_BASE
          If unsure, say N.
 
 config RANDOMIZE_MODULE_REGION_FULL
-       bool "Randomize the module region over a 4 GB range"
+       bool "Randomize the module region over a 2 GB range"
        depends on RANDOMIZE_BASE
        default y
        help
-         Randomizes the location of the module region inside a 4 GB window
+         Randomizes the location of the module region inside a 2 GB window
          covering the core kernel. This way, it is less likely for modules
          to leak information about the location of core kernel data structures
          but it does imply that function calls between modules and the core
@@ -1812,7 +1813,10 @@ config RANDOMIZE_MODULE_REGION_FULL
 
          When this option is not set, the module region will be randomized over
          a limited range that contains the [_stext, _etext] interval of the
-         core kernel, so branch relocations are always in range.
+         core kernel, so branch relocations are almost always in range unless
+         ARM64_MODULE_PLTS is enabled and the region is exhausted. In this
+         particular case of region exhaustion, modules might be able to fall
+         back to a larger 2GB area.
 
 config CC_HAVE_STACKPROTECTOR_SYSREG
        def_bool $(cc-option,-mstack-protector-guard=sysreg -mstack-protector-guard-reg=sp_el0 -mstack-protector-guard-offset=0)
index 7bc37d0..1110d38 100644 (file)
@@ -21,19 +21,11 @@ LDFLAGS_vmlinux             += -shared -Bsymbolic -z notext \
 endif
 
 ifeq ($(CONFIG_ARM64_ERRATUM_843419),y)
-  ifneq ($(CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419),y)
-$(warning ld does not support --fix-cortex-a53-843419; kernel may be susceptible to erratum)
-  else
+  ifeq ($(CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419),y)
 LDFLAGS_vmlinux        += --fix-cortex-a53-843419
   endif
 endif
 
-ifeq ($(CONFIG_ARM64_USE_LSE_ATOMICS), y)
-  ifneq ($(CONFIG_ARM64_LSE_ATOMICS), y)
-$(warning LSE atomics not supported by binutils)
-  endif
-endif
-
 cc_has_k_constraint := $(call try-run,echo                             \
        'int main(void) {                                               \
                asm volatile("and w0, w0, %w0" :: "K" (4294967295));    \
@@ -176,10 +168,23 @@ vdso_install:
 
 archprepare:
        $(Q)$(MAKE) $(build)=arch/arm64/tools kapi
+ifeq ($(CONFIG_ARM64_ERRATUM_843419),y)
+  ifneq ($(CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419),y)
+       @echo "warning: ld does not support --fix-cortex-a53-843419; kernel may be susceptible to erratum" >&2
+  endif
+endif
+ifeq ($(CONFIG_ARM64_USE_LSE_ATOMICS),y)
+  ifneq ($(CONFIG_ARM64_LSE_ATOMICS),y)
+       @echo "warning: LSE atomics not supported by binutils" >&2
+  endif
+endif
+
 
 # We use MRPROPER_FILES and CLEAN_FILES now
 archclean:
        $(Q)$(MAKE) $(clean)=$(boot)
+       $(Q)$(MAKE) $(clean)=arch/arm64/kernel/vdso
+       $(Q)$(MAKE) $(clean)=arch/arm64/kernel/vdso32
 
 ifeq ($(KBUILD_EXTMOD),)
 # We need to generate vdso-offsets.h before compiling certain files in kernel/.
index dd764b7..f6a79c8 100644 (file)
@@ -54,6 +54,7 @@
 
 &mscc_felix_port0 {
        label = "swp0";
+       managed = "in-band-status";
        phy-handle = <&phy0>;
        phy-mode = "sgmii";
        status = "okay";
@@ -61,6 +62,7 @@
 
 &mscc_felix_port1 {
        label = "swp1";
+       managed = "in-band-status";
        phy-handle = <&phy1>;
        phy-mode = "sgmii";
        status = "okay";
index b2e3e5d..343ecf0 100644 (file)
@@ -66,7 +66,7 @@
                };
        };
 
-       sysclk: clock-sysclk {
+       sysclk: sysclk {
                compatible = "fixed-clock";
                #clock-cells = <0>;
                clock-frequency = <100000000>;
index ca38d0d..f4eaab3 100644 (file)
                        };
 
                        flexcan1: can@308c0000 {
-                               compatible = "fsl,imx8mp-flexcan", "fsl,imx6q-flexcan";
+                               compatible = "fsl,imx8mp-flexcan";
                                reg = <0x308c0000 0x10000>;
                                interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>;
                                clocks = <&clk IMX8MP_CLK_IPG_ROOT>,
                        };
 
                        flexcan2: can@308d0000 {
-                               compatible = "fsl,imx8mp-flexcan", "fsl,imx6q-flexcan";
+                               compatible = "fsl,imx8mp-flexcan";
                                reg = <0x308d0000 0x10000>;
                                interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
                                clocks = <&clk IMX8MP_CLK_IPG_ROOT>,
index ce2bcdd..a05b1ab 100644 (file)
@@ -19,6 +19,8 @@
        aliases {
                spi0 = &spi0;
                ethernet1 = &eth1;
+               mmc0 = &sdhci0;
+               mmc1 = &sdhci1;
        };
 
        chosen {
        pinctrl-names = "default";
        pinctrl-0 = <&i2c1_pins>;
        clock-frequency = <100000>;
+       /delete-property/ mrvl,i2c-fast-mode;
        status = "okay";
 
        rtc@6f {
index 076d5ef..5ba7a45 100644 (file)
 
                interconnects = <&mc TEGRA194_MEMORY_CLIENT_PCIE1R &emc>,
                                <&mc TEGRA194_MEMORY_CLIENT_PCIE1W &emc>;
-               interconnect-names = "read", "write";
+               interconnect-names = "dma-mem", "write";
+               iommus = <&smmu TEGRA194_SID_PCIE1>;
+               iommu-map = <0x0 &smmu TEGRA194_SID_PCIE1 0x1000>;
+               iommu-map-mask = <0x0>;
+               dma-coherent;
        };
 
        pcie@14120000 {
 
                interconnects = <&mc TEGRA194_MEMORY_CLIENT_PCIE2AR &emc>,
                                <&mc TEGRA194_MEMORY_CLIENT_PCIE2AW &emc>;
-               interconnect-names = "read", "write";
+               interconnect-names = "dma-mem", "write";
+               iommus = <&smmu TEGRA194_SID_PCIE2>;
+               iommu-map = <0x0 &smmu TEGRA194_SID_PCIE2 0x1000>;
+               iommu-map-mask = <0x0>;
+               dma-coherent;
        };
 
        pcie@14140000 {
 
                interconnects = <&mc TEGRA194_MEMORY_CLIENT_PCIE3R &emc>,
                                <&mc TEGRA194_MEMORY_CLIENT_PCIE3W &emc>;
-               interconnect-names = "read", "write";
+               interconnect-names = "dma-mem", "write";
+               iommus = <&smmu TEGRA194_SID_PCIE3>;
+               iommu-map = <0x0 &smmu TEGRA194_SID_PCIE3 0x1000>;
+               iommu-map-mask = <0x0>;
+               dma-coherent;
        };
 
        pcie@14160000 {
 
                interconnects = <&mc TEGRA194_MEMORY_CLIENT_PCIE4R &emc>,
                                <&mc TEGRA194_MEMORY_CLIENT_PCIE4W &emc>;
-               interconnect-names = "read", "write";
+               interconnect-names = "dma-mem", "write";
+               iommus = <&smmu TEGRA194_SID_PCIE4>;
+               iommu-map = <0x0 &smmu TEGRA194_SID_PCIE4 0x1000>;
+               iommu-map-mask = <0x0>;
+               dma-coherent;
        };
 
        pcie@14180000 {
 
                interconnects = <&mc TEGRA194_MEMORY_CLIENT_PCIE0R &emc>,
                                <&mc TEGRA194_MEMORY_CLIENT_PCIE0W &emc>;
-               interconnect-names = "read", "write";
+               interconnect-names = "dma-mem", "write";
+               iommus = <&smmu TEGRA194_SID_PCIE0>;
+               iommu-map = <0x0 &smmu TEGRA194_SID_PCIE0 0x1000>;
+               iommu-map-mask = <0x0>;
+               dma-coherent;
        };
 
        pcie@141a0000 {
 
                interconnects = <&mc TEGRA194_MEMORY_CLIENT_PCIE5R &emc>,
                                <&mc TEGRA194_MEMORY_CLIENT_PCIE5W &emc>;
-               interconnect-names = "read", "write";
+               interconnect-names = "dma-mem", "write";
+               iommus = <&smmu TEGRA194_SID_PCIE5>;
+               iommu-map = <0x0 &smmu TEGRA194_SID_PCIE5 0x1000>;
+               iommu-map-mask = <0x0>;
+               dma-coherent;
        };
 
        pcie_ep@14160000 {
                nvidia,aspm-cmrt-us = <60>;
                nvidia,aspm-pwr-on-t-us = <20>;
                nvidia,aspm-l0s-entrance-latency-us = <3>;
+
+               interconnects = <&mc TEGRA194_MEMORY_CLIENT_PCIE4R &emc>,
+                               <&mc TEGRA194_MEMORY_CLIENT_PCIE4W &emc>;
+               interconnect-names = "dma-mem", "write";
+               iommus = <&smmu TEGRA194_SID_PCIE4>;
+               iommu-map = <0x0 &smmu TEGRA194_SID_PCIE4 0x1000>;
+               iommu-map-mask = <0x0>;
+               dma-coherent;
        };
 
        pcie_ep@14180000 {
                nvidia,aspm-cmrt-us = <60>;
                nvidia,aspm-pwr-on-t-us = <20>;
                nvidia,aspm-l0s-entrance-latency-us = <3>;
+
+               interconnects = <&mc TEGRA194_MEMORY_CLIENT_PCIE0R &emc>,
+                               <&mc TEGRA194_MEMORY_CLIENT_PCIE0W &emc>;
+               interconnect-names = "dma-mem", "write";
+               iommus = <&smmu TEGRA194_SID_PCIE0>;
+               iommu-map = <0x0 &smmu TEGRA194_SID_PCIE0 0x1000>;
+               iommu-map-mask = <0x0>;
+               dma-coherent;
        };
 
        pcie_ep@141a0000 {
                nvidia,aspm-cmrt-us = <60>;
                nvidia,aspm-pwr-on-t-us = <20>;
                nvidia,aspm-l0s-entrance-latency-us = <3>;
+
+               interconnects = <&mc TEGRA194_MEMORY_CLIENT_PCIE5R &emc>,
+                               <&mc TEGRA194_MEMORY_CLIENT_PCIE5W &emc>;
+               interconnect-names = "dma-mem", "write";
+               iommus = <&smmu TEGRA194_SID_PCIE5>;
+               iommu-map = <0x0 &smmu TEGRA194_SID_PCIE5 0x1000>;
+               iommu-map-mask = <0x0>;
+               dma-coherent;
        };
 
        sram@40000000 {
index 23cdcc9..1ccca83 100644 (file)
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /* Copyright (c) 2015, LGE Inc. All rights reserved.
  * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Petr Vorel <petr.vorel@gmail.com>
  */
 
 /dts-v1/;
@@ -9,6 +10,9 @@
 #include "pm8994.dtsi"
 #include "pmi8994.dtsi"
 
+/* cont_splash_mem has different memory mapping */
+/delete-node/ &cont_splash_mem;
+
 / {
        model = "LG Nexus 5X";
        compatible = "lg,bullhead", "qcom,msm8992";
@@ -17,6 +21,9 @@
        qcom,board-id = <0xb64 0>;
        qcom,pmic-id = <0x10009 0x1000A 0x0 0x0>;
 
+       /* Bullhead firmware doesn't support PSCI */
+       /delete-node/ psci;
+
        aliases {
                serial0 = &blsp1_uart2;
        };
                        ftrace-size = <0x10000>;
                        pmsg-size = <0x20000>;
                };
+
+               cont_splash_mem: memory@3400000 {
+                       reg = <0 0x03400000 0 0x1200000>;
+                       no-map;
+               };
        };
 };
 
index ffe1a9b..c096b77 100644 (file)
@@ -1,12 +1,16 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /* Copyright (c) 2015, Huawei Inc. All rights reserved.
  * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Petr Vorel <petr.vorel@gmail.com>
  */
 
 /dts-v1/;
 
 #include "msm8994.dtsi"
 
+/* Angler's firmware does not report where the memory is allocated */
+/delete-node/ &cont_splash_mem;
+
 / {
        model = "Huawei Nexus 6P";
        compatible = "huawei,angler", "qcom,msm8994";
index a8c274a..188c576 100644 (file)
                                           &BIG_CPU_SLEEP_1
                                           &CLUSTER_SLEEP_0>;
                        next-level-cache = <&L2_700>;
-                       qcom,freq-domain = <&cpufreq_hw 1>;
+                       qcom,freq-domain = <&cpufreq_hw 2>;
                        #cooling-cells = <2>;
                        L2_700: l2-cache {
                                compatible = "cache";
index 4d052e3..eb6b1d1 100644 (file)
@@ -69,7 +69,7 @@
                };
                rmtfs_upper_guard: memory@f5d01000 {
                        no-map;
-                       reg = <0 0xf5d01000 0 0x2000>;
+                       reg = <0 0xf5d01000 0 0x1000>;
                };
 
                /*
@@ -78,7 +78,7 @@
                 */
                removed_region: memory@88f00000 {
                        no-map;
-                       reg = <0 0x88f00000 0 0x200000>;
+                       reg = <0 0x88f00000 0 0x1c00000>;
                };
 
                ramoops: ramoops@ac300000 {
index c2a709a..d7591a4 100644 (file)
                left_spkr: wsa8810-left{
                        compatible = "sdw10217211000";
                        reg = <0 3>;
-                       powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_HIGH>;
+                       powerdown-gpios = <&wcdgpio 1 GPIO_ACTIVE_HIGH>;
                        #thermal-sensor-cells = <0>;
                        sound-name-prefix = "SpkrLeft";
                        #sound-dai-cells = <0>;
 
                right_spkr: wsa8810-right{
                        compatible = "sdw10217211000";
-                       powerdown-gpios = <&wcdgpio 3 GPIO_ACTIVE_HIGH>;
+                       powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_HIGH>;
                        reg = <0 4>;
                        #thermal-sensor-cells = <0>;
                        sound-name-prefix = "SpkrRight";
index b8eb045..55f1945 100644 (file)
@@ -51,7 +51,7 @@ config CRYPTO_SM4_ARM64_CE
        tristate "SM4 symmetric cipher (ARMv8.2 Crypto Extensions)"
        depends on KERNEL_MODE_NEON
        select CRYPTO_ALGAPI
-       select CRYPTO_SM4
+       select CRYPTO_LIB_SM4
 
 config CRYPTO_GHASH_ARM64_CE
        tristate "GHASH/AES-GCM using ARMv8 Crypto Extensions"
index 2754c87..9c93cfc 100644 (file)
@@ -17,12 +17,20 @@ MODULE_LICENSE("GPL v2");
 
 asmlinkage void sm4_ce_do_crypt(const u32 *rk, void *out, const void *in);
 
+static int sm4_ce_setkey(struct crypto_tfm *tfm, const u8 *key,
+                      unsigned int key_len)
+{
+       struct sm4_ctx *ctx = crypto_tfm_ctx(tfm);
+
+       return sm4_expandkey(ctx, key, key_len);
+}
+
 static void sm4_ce_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
 {
-       const struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm);
+       const struct sm4_ctx *ctx = crypto_tfm_ctx(tfm);
 
        if (!crypto_simd_usable()) {
-               crypto_sm4_encrypt(tfm, out, in);
+               sm4_crypt_block(ctx->rkey_enc, out, in);
        } else {
                kernel_neon_begin();
                sm4_ce_do_crypt(ctx->rkey_enc, out, in);
@@ -32,10 +40,10 @@ static void sm4_ce_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
 
 static void sm4_ce_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
 {
-       const struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm);
+       const struct sm4_ctx *ctx = crypto_tfm_ctx(tfm);
 
        if (!crypto_simd_usable()) {
-               crypto_sm4_decrypt(tfm, out, in);
+               sm4_crypt_block(ctx->rkey_dec, out, in);
        } else {
                kernel_neon_begin();
                sm4_ce_do_crypt(ctx->rkey_dec, out, in);
@@ -49,12 +57,12 @@ static struct crypto_alg sm4_ce_alg = {
        .cra_priority                   = 200,
        .cra_flags                      = CRYPTO_ALG_TYPE_CIPHER,
        .cra_blocksize                  = SM4_BLOCK_SIZE,
-       .cra_ctxsize                    = sizeof(struct crypto_sm4_ctx),
+       .cra_ctxsize                    = sizeof(struct sm4_ctx),
        .cra_module                     = THIS_MODULE,
        .cra_u.cipher = {
                .cia_min_keysize        = SM4_KEY_SIZE,
                .cia_max_keysize        = SM4_KEY_SIZE,
-               .cia_setkey             = crypto_sm4_set_key,
+               .cia_setkey             = sm4_ce_setkey,
                .cia_encrypt            = sm4_ce_encrypt,
                .cia_decrypt            = sm4_ce_decrypt
        }
index 21fa330..b83fb24 100644 (file)
@@ -33,8 +33,7 @@
  * EL2.
  */
 .macro __init_el2_timers
-       mrs     x0, cnthctl_el2
-       orr     x0, x0, #3                      // Enable EL1 physical timers
+       mov     x0, #3                          // Enable EL1 physical timers
        msr     cnthctl_el2, x0
        msr     cntvoff_el2, xzr                // Clear virtual offset
 .endm
index 993a27e..f98c91b 100644 (file)
@@ -41,6 +41,7 @@ void tag_clear_highpage(struct page *to);
 
 typedef struct page *pgtable_t;
 
+int pfn_valid(unsigned long pfn);
 int pfn_is_map_memory(unsigned long pfn);
 
 #include <asm/memory.h>
index e58bca8..41b332c 100644 (file)
@@ -320,7 +320,17 @@ static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
 
 static inline unsigned long regs_return_value(struct pt_regs *regs)
 {
-       return regs->regs[0];
+       unsigned long val = regs->regs[0];
+
+       /*
+        * Audit currently uses regs_return_value() instead of
+        * syscall_get_return_value(). Apply the same sign-extension here until
+        * audit is updated to use syscall_get_return_value().
+        */
+       if (compat_user_mode(regs))
+               val = sign_extend64(val, 31);
+
+       return val;
 }
 
 static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
index 1801399..8aebc00 100644 (file)
@@ -35,7 +35,7 @@ struct stack_info {
  * accounting information necessary for robust unwinding.
  *
  * @fp:          The fp value in the frame record (or the real fp)
- * @pc:          The fp value in the frame record (or the real lr)
+ * @pc:          The lr value in the frame record (or the real lr)
  *
  * @stacks_done: Stacks which have been entirely unwound, for which it is no
  *               longer valid to unwind to.
index cfc0672..03e2089 100644 (file)
@@ -29,22 +29,23 @@ static inline void syscall_rollback(struct task_struct *task,
        regs->regs[0] = regs->orig_x0;
 }
 
-
-static inline long syscall_get_error(struct task_struct *task,
-                                    struct pt_regs *regs)
+static inline long syscall_get_return_value(struct task_struct *task,
+                                           struct pt_regs *regs)
 {
-       unsigned long error = regs->regs[0];
+       unsigned long val = regs->regs[0];
 
        if (is_compat_thread(task_thread_info(task)))
-               error = sign_extend64(error, 31);
+               val = sign_extend64(val, 31);
 
-       return IS_ERR_VALUE(error) ? error : 0;
+       return val;
 }
 
-static inline long syscall_get_return_value(struct task_struct *task,
-                                           struct pt_regs *regs)
+static inline long syscall_get_error(struct task_struct *task,
+                                    struct pt_regs *regs)
 {
-       return regs->regs[0];
+       unsigned long error = syscall_get_return_value(task, regs);
+
+       return IS_ERR_VALUE(error) ? error : 0;
 }
 
 static inline void syscall_set_return_value(struct task_struct *task,
index cfa2cfd..418b2bb 100644 (file)
@@ -162,7 +162,9 @@ u64 __init kaslr_early_init(void)
                 * a PAGE_SIZE multiple in the range [_etext - MODULES_VSIZE,
                 * _stext) . This guarantees that the resulting region still
                 * covers [_stext, _etext], and that all relative branches can
-                * be resolved without veneers.
+                * be resolved without veneers unless this region is exhausted
+                * and we fall back to a larger 2GB window in module_alloc()
+                * when ARM64_MODULE_PLTS is enabled.
                 */
                module_range = MODULES_VSIZE - (u64)(_etext - _stext);
                module_alloc_base = (u64)_etext + offset - MODULES_VSIZE;
index 499b6b2..b381a1e 100644 (file)
@@ -1862,7 +1862,7 @@ void syscall_trace_exit(struct pt_regs *regs)
        audit_syscall_exit(regs);
 
        if (flags & _TIF_SYSCALL_TRACEPOINT)
-               trace_sys_exit(regs, regs_return_value(regs));
+               trace_sys_exit(regs, syscall_get_return_value(current, regs));
 
        if (flags & (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP))
                tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
index f8192f4..2303633 100644 (file)
@@ -29,6 +29,7 @@
 #include <asm/unistd.h>
 #include <asm/fpsimd.h>
 #include <asm/ptrace.h>
+#include <asm/syscall.h>
 #include <asm/signal32.h>
 #include <asm/traps.h>
 #include <asm/vdso.h>
@@ -890,7 +891,7 @@ static void do_signal(struct pt_regs *regs)
                     retval == -ERESTART_RESTARTBLOCK ||
                     (retval == -ERESTARTSYS &&
                      !(ksig.ka.sa.sa_flags & SA_RESTART)))) {
-                       regs->regs[0] = -EINTR;
+                       syscall_set_return_value(current, regs, -EINTR, 0);
                        regs->pc = continue_addr;
                }
 
index b83c8d9..8982a2b 100644 (file)
@@ -218,7 +218,7 @@ void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
 
 #ifdef CONFIG_STACKTRACE
 
-noinline void arch_stack_walk(stack_trace_consume_fn consume_entry,
+noinline notrace void arch_stack_walk(stack_trace_consume_fn consume_entry,
                              void *cookie, struct task_struct *task,
                              struct pt_regs *regs)
 {
index 263d6c1..50a0f1a 100644 (file)
@@ -54,10 +54,7 @@ static void invoke_syscall(struct pt_regs *regs, unsigned int scno,
                ret = do_ni_syscall(regs, scno);
        }
 
-       if (is_compat_task())
-               ret = lower_32_bits(ret);
-
-       regs->regs[0] = ret;
+       syscall_set_return_value(current, regs, 0, ret);
 
        /*
         * Ultimately, this value will get limited by KSTACK_OFFSET_MAX(),
@@ -115,7 +112,7 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
                 * syscall. do_notify_resume() will send a signal to userspace
                 * before the syscall is restarted.
                 */
-               regs->regs[0] = -ERESTARTNOINTR;
+               syscall_set_return_value(current, regs, -ERESTARTNOINTR, 0);
                return;
        }
 
@@ -136,7 +133,7 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
                 * anyway.
                 */
                if (scno == NO_SYSCALL)
-                       regs->regs[0] = -ENOSYS;
+                       syscall_set_return_value(current, regs, -ENOSYS, 0);
                scno = syscall_trace_enter(regs);
                if (scno == NO_SYSCALL)
                        goto trace_exit;
index e9a2b8f..0ca72f5 100644 (file)
@@ -94,10 +94,14 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
                kvm->arch.return_nisv_io_abort_to_user = true;
                break;
        case KVM_CAP_ARM_MTE:
-               if (!system_supports_mte() || kvm->created_vcpus)
-                       return -EINVAL;
-               r = 0;
-               kvm->arch.mte_enabled = true;
+               mutex_lock(&kvm->lock);
+               if (!system_supports_mte() || kvm->created_vcpus) {
+                       r = -EINVAL;
+               } else {
+                       r = 0;
+                       kvm->arch.mte_enabled = true;
+               }
+               mutex_unlock(&kvm->lock);
                break;
        default:
                r = -EINVAL;
index d938ce9..a6ce991 100644 (file)
@@ -193,7 +193,7 @@ static bool range_is_memory(u64 start, u64 end)
 {
        struct kvm_mem_range r1, r2;
 
-       if (!find_mem_range(start, &r1) || !find_mem_range(end, &r2))
+       if (!find_mem_range(start, &r1) || !find_mem_range(end - 1, &r2))
                return false;
        if (r1.start != r2.start)
                return false;
index 3155c9e..0625bf2 100644 (file)
@@ -947,7 +947,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                vma_shift = get_vma_page_shift(vma, hva);
        }
 
-       shared = (vma->vm_flags & VM_PFNMAP);
+       shared = (vma->vm_flags & VM_SHARED);
 
        switch (vma_shift) {
 #ifndef __PAGETABLE_PMD_FOLDED
index 8490ed2..1fdb7bb 100644 (file)
@@ -219,6 +219,43 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
        free_area_init(max_zone_pfns);
 }
 
+int pfn_valid(unsigned long pfn)
+{
+       phys_addr_t addr = PFN_PHYS(pfn);
+       struct mem_section *ms;
+
+       /*
+        * Ensure the upper PAGE_SHIFT bits are clear in the
+        * pfn. Else it might lead to false positives when
+        * some of the upper bits are set, but the lower bits
+        * match a valid pfn.
+        */
+       if (PHYS_PFN(addr) != pfn)
+               return 0;
+
+       if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
+               return 0;
+
+       ms = __pfn_to_section(pfn);
+       if (!valid_section(ms))
+               return 0;
+
+       /*
+        * ZONE_DEVICE memory does not have the memblock entries.
+        * memblock_is_map_memory() check for ZONE_DEVICE based
+        * addresses will always fail. Even the normal hotplugged
+        * memory will never have MEMBLOCK_NOMAP flag set in their
+        * memblock entries. Skip memblock search for all non early
+        * memory sections covering all of hotplug memory including
+        * both normal and ZONE_DEVICE based.
+        */
+       if (!early_section(ms))
+               return pfn_section_valid(ms, pfn);
+
+       return memblock_is_memory(addr);
+}
+EXPORT_SYMBOL(pfn_valid);
+
 int pfn_is_map_memory(unsigned long pfn)
 {
        phys_addr_t addr = PFN_PHYS(pfn);
index dccf98a..41c23f4 100644 (file)
@@ -823,6 +823,19 @@ emit_cond_jmp:
                        return ret;
                break;
 
+       /* speculation barrier */
+       case BPF_ST | BPF_NOSPEC:
+               /*
+                * Nothing required here.
+                *
+                * In case of arm64, we rely on the firmware mitigation of
+                * Speculative Store Bypass as controlled via the ssbd kernel
+                * parameter. Whenever the mitigation is enabled, it works
+                * for all of the kernel code with no need to provide any
+                * additional instructions.
+                */
+               break;
+
        /* ST: *(size *)(dst + off) = imm */
        case BPF_ST | BPF_MEM | BPF_W:
        case BPF_ST | BPF_MEM | BPF_H:
index b5e14d5..c30baa0 100644 (file)
@@ -44,7 +44,6 @@ config H8300_H8MAX
        bool "H8MAX"
        select H83069
        select RAMKERNEL
-       select HAVE_IDE
        help
          H8MAX Evaluation Board Support
          More Information. (Japanese Only)
index cf425c2..4993c7a 100644 (file)
@@ -25,7 +25,6 @@ config IA64
        select HAVE_ASM_MODVERSIONS
        select HAVE_UNSTABLE_SCHED_CLOCK
        select HAVE_EXIT_THREAD
-       select HAVE_IDE
        select HAVE_KPROBES
        select HAVE_KRETPROBES
        select HAVE_FTRACE_MCOUNT_RECORD
index 96989ad..d632a1d 100644 (file)
@@ -23,7 +23,6 @@ config M68K
        select HAVE_DEBUG_BUGVERBOSE
        select HAVE_EFFICIENT_UNALIGNED_ACCESS if !CPU_HAS_NO_UNALIGNED
        select HAVE_FUTEX_CMPXCHG if MMU && FUTEX
-       select HAVE_IDE
        select HAVE_MOD_ARCH_SPECIFIC
        select HAVE_UID16
        select MMU_GATHER_NO_RANGE if MMU
index 29e9463..277d61a 100644 (file)
@@ -26,6 +26,7 @@ config COLDFIRE
        bool "Coldfire CPU family support"
        select ARCH_HAVE_CUSTOM_GPIO_H
        select CPU_HAS_NO_BITFIELDS
+       select CPU_HAS_NO_CAS
        select CPU_HAS_NO_MULDIV64
        select GENERIC_CSUM
        select GPIOLIB
@@ -39,6 +40,7 @@ config M68000
        bool
        depends on !MMU
        select CPU_HAS_NO_BITFIELDS
+       select CPU_HAS_NO_CAS
        select CPU_HAS_NO_MULDIV64
        select CPU_HAS_NO_UNALIGNED
        select GENERIC_CSUM
@@ -54,6 +56,7 @@ config M68000
 config MCPU32
        bool
        select CPU_HAS_NO_BITFIELDS
+       select CPU_HAS_NO_CAS
        select CPU_HAS_NO_UNALIGNED
        select CPU_NO_EFFICIENT_FFS
        help
@@ -383,7 +386,7 @@ config ADVANCED
 
 config RMW_INSNS
        bool "Use read-modify-write instructions"
-       depends on ADVANCED
+       depends on ADVANCED && !CPU_HAS_NO_CAS
        help
          This allows to use certain instructions that work with indivisible
          read-modify-write bus cycles. While this is faster than the
@@ -450,6 +453,9 @@ config M68K_L2_CACHE
 config CPU_HAS_NO_BITFIELDS
        bool
 
+config CPU_HAS_NO_CAS
+       bool
+
 config CPU_HAS_NO_MULDIV64
        bool
 
index 2c4d2ca..4853751 100644 (file)
@@ -26,7 +26,7 @@ DEFINE_CLK(pll, "pll.0", MCF_CLK);
 DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
 
 static struct clk_lookup m525x_clk_lookup[] = {
-       CLKDEV_INIT(NULL, "pll.0", &pll),
+       CLKDEV_INIT(NULL, "pll.0", &clk_pll),
        CLKDEV_INIT(NULL, "sys.0", &clk_sys),
        CLKDEV_INIT("mcftmr.0", NULL, &clk_sys),
        CLKDEV_INIT("mcftmr.1", NULL, &clk_sys),
index 0a2cacf..5f53628 100644 (file)
@@ -84,6 +84,7 @@ CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_HOOK=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_ZONES=y
 # CONFIG_NF_CONNTRACK_PROCFS is not set
@@ -323,7 +324,6 @@ CONFIG_CDROM_PKTCDVD=m
 CONFIG_ATA_OVER_ETH=m
 CONFIG_DUMMY_IRQ=m
 CONFIG_RAID_ATTRS=m
-CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=m
 CONFIG_BLK_DEV_SR=y
@@ -502,6 +502,7 @@ CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
 CONFIG_CIFS=m
+# CONFIG_CIFS_STATS2 is not set
 # CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
@@ -616,6 +617,7 @@ CONFIG_PRIME_NUMBERS=m
 CONFIG_CRC32_SELFTEST=m
 CONFIG_CRC64=m
 CONFIG_XZ_DEC_TEST=m
+CONFIG_GLOB_SELFTEST=m
 CONFIG_STRING_SELFTEST=m
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
@@ -624,7 +626,6 @@ CONFIG_WW_MUTEX_SELFTEST=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_KUNIT=m
 CONFIG_KUNIT_ALL_TESTS=m
-CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_MIN_HEAP=m
 CONFIG_TEST_SORT=m
 CONFIG_TEST_DIV64=m
@@ -636,6 +637,7 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_STRSCPY=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_SCANF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
 CONFIG_TEST_XARRAY=m
index 4dc6dcf..d956864 100644 (file)
@@ -80,6 +80,7 @@ CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_HOOK=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_ZONES=y
 # CONFIG_NF_CONNTRACK_PROCFS is not set
@@ -458,6 +459,7 @@ CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
 CONFIG_CIFS=m
+# CONFIG_CIFS_STATS2 is not set
 # CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
@@ -580,7 +582,6 @@ CONFIG_WW_MUTEX_SELFTEST=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_KUNIT=m
 CONFIG_KUNIT_ALL_TESTS=m
-CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_MIN_HEAP=m
 CONFIG_TEST_SORT=m
 CONFIG_TEST_DIV64=m
@@ -592,6 +593,7 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_STRSCPY=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_SCANF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
 CONFIG_TEST_XARRAY=m
index 23d910a..dbf1960 100644 (file)
@@ -87,6 +87,7 @@ CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_HOOK=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_ZONES=y
 # CONFIG_NF_CONNTRACK_PROCFS is not set
@@ -324,7 +325,6 @@ CONFIG_CDROM_PKTCDVD=m
 CONFIG_ATA_OVER_ETH=m
 CONFIG_DUMMY_IRQ=m
 CONFIG_RAID_ATTRS=m
-CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=m
 CONFIG_BLK_DEV_SR=y
@@ -480,6 +480,7 @@ CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
 CONFIG_CIFS=m
+# CONFIG_CIFS_STATS2 is not set
 # CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
@@ -594,6 +595,7 @@ CONFIG_PRIME_NUMBERS=m
 CONFIG_CRC32_SELFTEST=m
 CONFIG_CRC64=m
 CONFIG_XZ_DEC_TEST=m
+CONFIG_GLOB_SELFTEST=m
 CONFIG_STRING_SELFTEST=m
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
@@ -602,7 +604,6 @@ CONFIG_WW_MUTEX_SELFTEST=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_KUNIT=m
 CONFIG_KUNIT_ALL_TESTS=m
-CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_MIN_HEAP=m
 CONFIG_TEST_SORT=m
 CONFIG_TEST_DIV64=m
@@ -614,6 +615,7 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_STRSCPY=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_SCANF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
 CONFIG_TEST_XARRAY=m
index 2c3f428..7620db3 100644 (file)
@@ -77,6 +77,7 @@ CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_HOOK=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_ZONES=y
 # CONFIG_NF_CONNTRACK_PROCFS is not set
@@ -451,6 +452,7 @@ CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
 CONFIG_CIFS=m
+# CONFIG_CIFS_STATS2 is not set
 # CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
@@ -573,7 +575,6 @@ CONFIG_WW_MUTEX_SELFTEST=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_KUNIT=m
 CONFIG_KUNIT_ALL_TESTS=m
-CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_MIN_HEAP=m
 CONFIG_TEST_SORT=m
 CONFIG_TEST_DIV64=m
@@ -585,6 +586,7 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_STRSCPY=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_SCANF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
 CONFIG_TEST_XARRAY=m
index 5b1898d..113a02d 100644 (file)
@@ -79,6 +79,7 @@ CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_HOOK=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_ZONES=y
 # CONFIG_NF_CONNTRACK_PROCFS is not set
@@ -460,6 +461,7 @@ CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
 CONFIG_CIFS=m
+# CONFIG_CIFS_STATS2 is not set
 # CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
@@ -582,7 +584,6 @@ CONFIG_WW_MUTEX_SELFTEST=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_KUNIT=m
 CONFIG_KUNIT_ALL_TESTS=m
-CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_MIN_HEAP=m
 CONFIG_TEST_SORT=m
 CONFIG_TEST_DIV64=m
@@ -594,6 +595,7 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_STRSCPY=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_SCANF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
 CONFIG_TEST_XARRAY=m
index 9606ccd..a8e006e 100644 (file)
@@ -78,6 +78,7 @@ CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_HOOK=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_ZONES=y
 # CONFIG_NF_CONNTRACK_PROCFS is not set
@@ -315,7 +316,6 @@ CONFIG_CDROM_PKTCDVD=m
 CONFIG_ATA_OVER_ETH=m
 CONFIG_DUMMY_IRQ=m
 CONFIG_RAID_ATTRS=m
-CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=m
 CONFIG_BLK_DEV_SR=y
@@ -482,6 +482,7 @@ CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
 CONFIG_CIFS=m
+# CONFIG_CIFS_STATS2 is not set
 # CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
@@ -596,6 +597,7 @@ CONFIG_PRIME_NUMBERS=m
 CONFIG_CRC32_SELFTEST=m
 CONFIG_CRC64=m
 CONFIG_XZ_DEC_TEST=m
+CONFIG_GLOB_SELFTEST=m
 CONFIG_STRING_SELFTEST=m
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
@@ -604,7 +606,6 @@ CONFIG_WW_MUTEX_SELFTEST=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_KUNIT=m
 CONFIG_KUNIT_ALL_TESTS=m
-CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_MIN_HEAP=m
 CONFIG_TEST_SORT=m
 CONFIG_TEST_DIV64=m
@@ -616,6 +617,7 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_STRSCPY=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_SCANF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
 CONFIG_TEST_XARRAY=m
index 3175ba5..b665590 100644 (file)
@@ -98,6 +98,7 @@ CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_HOOK=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_ZONES=y
 # CONFIG_NF_CONNTRACK_PROCFS is not set
@@ -344,7 +345,6 @@ CONFIG_CDROM_PKTCDVD=m
 CONFIG_ATA_OVER_ETH=m
 CONFIG_DUMMY_IRQ=m
 CONFIG_RAID_ATTRS=m
-CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=m
 CONFIG_BLK_DEV_SR=y
@@ -567,6 +567,7 @@ CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
 CONFIG_CIFS=m
+# CONFIG_CIFS_STATS2 is not set
 # CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
@@ -681,6 +682,7 @@ CONFIG_PRIME_NUMBERS=m
 CONFIG_CRC32_SELFTEST=m
 CONFIG_CRC64=m
 CONFIG_XZ_DEC_TEST=m
+CONFIG_GLOB_SELFTEST=m
 CONFIG_STRING_SELFTEST=m
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
@@ -689,7 +691,6 @@ CONFIG_WW_MUTEX_SELFTEST=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_KUNIT=m
 CONFIG_KUNIT_ALL_TESTS=m
-CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_MIN_HEAP=m
 CONFIG_TEST_SORT=m
 CONFIG_TEST_DIV64=m
@@ -701,6 +702,7 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_STRSCPY=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_SCANF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
 CONFIG_TEST_XARRAY=m
index 793085f..563ba47 100644 (file)
@@ -76,6 +76,7 @@ CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_HOOK=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_ZONES=y
 # CONFIG_NF_CONNTRACK_PROCFS is not set
@@ -450,6 +451,7 @@ CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
 CONFIG_CIFS=m
+# CONFIG_CIFS_STATS2 is not set
 # CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
@@ -572,7 +574,6 @@ CONFIG_WW_MUTEX_SELFTEST=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_KUNIT=m
 CONFIG_KUNIT_ALL_TESTS=m
-CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_MIN_HEAP=m
 CONFIG_TEST_SORT=m
 CONFIG_TEST_DIV64=m
@@ -584,6 +585,7 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_STRSCPY=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_SCANF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
 CONFIG_TEST_XARRAY=m
index 56fbac7..9f1b44d 100644 (file)
@@ -77,6 +77,7 @@ CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_HOOK=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_ZONES=y
 # CONFIG_NF_CONNTRACK_PROCFS is not set
@@ -451,6 +452,7 @@ CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
 CONFIG_CIFS=m
+# CONFIG_CIFS_STATS2 is not set
 # CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
@@ -573,7 +575,6 @@ CONFIG_WW_MUTEX_SELFTEST=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_KUNIT=m
 CONFIG_KUNIT_ALL_TESTS=m
-CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_MIN_HEAP=m
 CONFIG_TEST_SORT=m
 CONFIG_TEST_DIV64=m
@@ -585,6 +586,7 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_STRSCPY=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_SCANF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
 CONFIG_TEST_XARRAY=m
index 0e15431..1993433 100644 (file)
@@ -78,6 +78,7 @@ CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_HOOK=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_ZONES=y
 # CONFIG_NF_CONNTRACK_PROCFS is not set
@@ -314,7 +315,6 @@ CONFIG_CDROM_PKTCDVD=m
 CONFIG_ATA_OVER_ETH=m
 CONFIG_DUMMY_IRQ=m
 CONFIG_RAID_ATTRS=m
-CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=m
 CONFIG_BLK_DEV_SR=y
@@ -469,6 +469,7 @@ CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
 CONFIG_CIFS=m
+# CONFIG_CIFS_STATS2 is not set
 # CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
@@ -583,6 +584,7 @@ CONFIG_PRIME_NUMBERS=m
 CONFIG_CRC32_SELFTEST=m
 CONFIG_CRC64=m
 CONFIG_XZ_DEC_TEST=m
+CONFIG_GLOB_SELFTEST=m
 CONFIG_STRING_SELFTEST=m
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
@@ -591,7 +593,6 @@ CONFIG_WW_MUTEX_SELFTEST=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_KUNIT=m
 CONFIG_KUNIT_ALL_TESTS=m
-CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_MIN_HEAP=m
 CONFIG_TEST_SORT=m
 CONFIG_TEST_DIV64=m
@@ -603,6 +604,7 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_STRSCPY=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_SCANF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
 CONFIG_TEST_XARRAY=m
index d923064..8898ae3 100644 (file)
@@ -22,7 +22,6 @@ CONFIG_RAMSIZE=0x8000000
 CONFIG_VECTORBASE=0x40000000
 CONFIG_KERNELBASE=0x40001000
 # CONFIG_BLK_DEV_BSG is not set
-CONFIG_BLK_CMDLINE_PARSER=y
 CONFIG_BINFMT_FLAT=y
 CONFIG_BINFMT_ZFLAT=y
 CONFIG_BINFMT_MISC=y
index 3490a05..56dbc63 100644 (file)
@@ -74,6 +74,7 @@ CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_HOOK=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_ZONES=y
 # CONFIG_NF_CONNTRACK_PROCFS is not set
@@ -453,6 +454,7 @@ CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
 CONFIG_CIFS=m
+# CONFIG_CIFS_STATS2 is not set
 # CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
@@ -574,7 +576,6 @@ CONFIG_TEST_LOCKUP=m
 CONFIG_WW_MUTEX_SELFTEST=m
 CONFIG_KUNIT=m
 CONFIG_KUNIT_ALL_TESTS=m
-CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_MIN_HEAP=m
 CONFIG_TEST_SORT=m
 CONFIG_TEST_DIV64=m
@@ -586,6 +587,7 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_STRSCPY=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_SCANF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
 CONFIG_TEST_XARRAY=m
index 4e92c8c..6bd1bba 100644 (file)
@@ -74,6 +74,7 @@ CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_HOOK=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_ZONES=y
 # CONFIG_NF_CONNTRACK_PROCFS is not set
@@ -452,6 +453,7 @@ CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
 CONFIG_CIFS=m
+# CONFIG_CIFS_STATS2 is not set
 # CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
@@ -574,7 +576,6 @@ CONFIG_WW_MUTEX_SELFTEST=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_KUNIT=m
 CONFIG_KUNIT_ALL_TESTS=m
-CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_MIN_HEAP=m
 CONFIG_TEST_SORT=m
 CONFIG_TEST_DIV64=m
@@ -586,6 +587,7 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_STRSCPY=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_SCANF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
 CONFIG_TEST_XARRAY=m
index d2875e3..79e5542 100644 (file)
@@ -254,8 +254,8 @@ static void __exit nfeth_cleanup(void)
 
        for (i = 0; i < MAX_UNIT; i++) {
                if (nfeth_dev[i]) {
-                       unregister_netdev(nfeth_dev[0]);
-                       free_netdev(nfeth_dev[0]);
+                       unregister_netdev(nfeth_dev[i]);
+                       free_netdev(nfeth_dev[i]);
                }
        }
        free_irq(nfEtherIRQ, nfeth_interrupt);
index 8637bf8..cfba83d 100644 (file)
@@ -48,7 +48,7 @@ static inline int arch_atomic_##op##_return(int i, atomic_t *v)               \
                        "       casl %2,%1,%0\n"                        \
                        "       jne 1b"                                 \
                        : "+m" (*v), "=&d" (t), "=&d" (tmp)             \
-                       : "g" (i), "2" (arch_atomic_read(v)));          \
+                       : "di" (i), "2" (arch_atomic_read(v)));         \
        return t;                                                       \
 }
 
@@ -63,7 +63,7 @@ static inline int arch_atomic_fetch_##op(int i, atomic_t *v)          \
                        "       casl %2,%1,%0\n"                        \
                        "       jne 1b"                                 \
                        : "+m" (*v), "=&d" (t), "=&d" (tmp)             \
-                       : "g" (i), "2" (arch_atomic_read(v)));          \
+                       : "di" (i), "2" (arch_atomic_read(v)));         \
        return tmp;                                                     \
 }
 
index cee6087..6dfb27d 100644 (file)
@@ -71,7 +71,6 @@ config MIPS
        select HAVE_FUNCTION_TRACER
        select HAVE_GCC_PLUGINS
        select HAVE_GENERIC_VDSO
-       select HAVE_IDE
        select HAVE_IOREMAP_PROT
        select HAVE_IRQ_EXIT_ON_IRQ_STACK
        select HAVE_IRQ_TIME_ACCOUNTING
index 4e942b7..653befc 100644 (file)
@@ -321,7 +321,7 @@ KBUILD_LDFLAGS              += -m $(ld-emul)
 
 ifdef CONFIG_MIPS
 CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -x c /dev/null | \
-       egrep -vw '__GNUC_(|MINOR_|PATCHLEVEL_)_' | \
+       egrep -vw '__GNUC_(MINOR_|PATCHLEVEL_)?_' | \
        sed -e "s/^\#define /-D'/" -e "s/ /'='/" -e "s/$$/'/" -e 's/\$$/&&/g')
 endif
 
index 9dbed7b..76e43a7 100644 (file)
@@ -69,24 +69,24 @@ static void ar2315_misc_irq_handler(struct irq_desc *desc)
 {
        u32 pending = ar2315_rst_reg_read(AR2315_ISR) &
                      ar2315_rst_reg_read(AR2315_IMR);
-       unsigned nr, misc_irq = 0;
+       unsigned nr;
+       int ret = 0;
 
        if (pending) {
                struct irq_domain *domain = irq_desc_get_handler_data(desc);
 
                nr = __ffs(pending);
-               misc_irq = irq_find_mapping(domain, nr);
-       }
 
-       if (misc_irq) {
                if (nr == AR2315_MISC_IRQ_GPIO)
                        ar2315_rst_reg_write(AR2315_ISR, AR2315_ISR_GPIO);
                else if (nr == AR2315_MISC_IRQ_WATCHDOG)
                        ar2315_rst_reg_write(AR2315_ISR, AR2315_ISR_WD);
-               generic_handle_irq(misc_irq);
-       } else {
-               spurious_interrupt();
+
+               ret = generic_handle_domain_irq(domain, nr);
        }
+
+       if (!pending || ret)
+               spurious_interrupt();
 }
 
 static void ar2315_misc_irq_unmask(struct irq_data *d)
index 23c879f..822b639 100644 (file)
@@ -73,22 +73,21 @@ static void ar5312_misc_irq_handler(struct irq_desc *desc)
 {
        u32 pending = ar5312_rst_reg_read(AR5312_ISR) &
                      ar5312_rst_reg_read(AR5312_IMR);
-       unsigned nr, misc_irq = 0;
+       unsigned nr;
+       int ret = 0;
 
        if (pending) {
                struct irq_domain *domain = irq_desc_get_handler_data(desc);
 
                nr = __ffs(pending);
-               misc_irq = irq_find_mapping(domain, nr);
-       }
 
-       if (misc_irq) {
-               generic_handle_irq(misc_irq);
+               ret = generic_handle_domain_irq(domain, nr);
                if (nr == AR5312_MISC_IRQ_TIMER)
                        ar5312_rst_reg_read(AR5312_TIMER);
-       } else {
-               spurious_interrupt();
        }
+
+       if (!pending || ret)
+               spurious_interrupt();
 }
 
 /* Enable the specified AR5312_MISC_IRQ interrupt */
index d502673..34d179c 100644 (file)
@@ -7,8 +7,6 @@
 #ifndef __ASM_RC32434_RB_H
 #define __ASM_RC32434_RB_H
 
-#include <linux/genhd.h>
-
 #define REGBASE                0x18000000
 #define IDT434_REG_BASE ((volatile void *) KSEG1ADDR(REGBASE))
 #define UART0BASE      0x58000
index 4b2567d..c7925d0 100644 (file)
@@ -58,15 +58,20 @@ do {                                                        \
 
 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
 {
-       pmd_t *pmd = NULL;
+       pmd_t *pmd;
        struct page *pg;
 
-       pg = alloc_pages(GFP_KERNEL | __GFP_ACCOUNT, PMD_ORDER);
-       if (pg) {
-               pgtable_pmd_page_ctor(pg);
-               pmd = (pmd_t *)page_address(pg);
-               pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table);
+       pg = alloc_pages(GFP_KERNEL_ACCOUNT, PMD_ORDER);
+       if (!pg)
+               return NULL;
+
+       if (!pgtable_pmd_page_ctor(pg)) {
+               __free_pages(pg, PMD_ORDER);
+               return NULL;
        }
+
+       pmd = (pmd_t *)page_address(pg);
+       pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table);
        return pmd;
 }
 
index acfbdc0..b732495 100644 (file)
@@ -300,7 +300,7 @@ static void ltq_hw_irq_handler(struct irq_desc *desc)
         */
        irq = __fls(irq);
        hwirq = irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module);
-       generic_handle_irq(irq_linear_revmap(ltq_domain, hwirq));
+       generic_handle_domain_irq(ltq_domain, hwirq);
 
        /* if this is a EBU irq, we need to ack it or get a deadlock */
        if (irq == LTQ_ICU_EBU_IRQ && !module && LTQ_EBU_PCC_ISTAT != 0)
index ee74719..4ffbcc5 100644 (file)
@@ -48,7 +48,8 @@ static struct plat_serial8250_port uart8250_data[] = {
                .mapbase        = 0x1f000900,   /* The CBUS UART */
                .irq            = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_MB2,
                .uartclk        = 3686400,      /* Twice the usual clk! */
-               .iotype         = UPIO_MEM32,
+               .iotype         = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) ?
+                                 UPIO_MEM32BE : UPIO_MEM32,
                .flags          = CBUS_UART_FLAGS,
                .regshift       = 3,
        },
index 939dd06..3a73e93 100644 (file)
@@ -1355,6 +1355,9 @@ jeq_common:
                }
                break;
 
+       case BPF_ST | BPF_NOSPEC: /* speculation barrier */
+               break;
+
        case BPF_ST | BPF_B | BPF_MEM:
        case BPF_ST | BPF_H | BPF_MEM:
        case BPF_ST | BPF_W | BPF_MEM:
index c1a655a..9a4bfb4 100644 (file)
@@ -337,14 +337,12 @@ static void ar2315_pci_irq_handler(struct irq_desc *desc)
        struct ar2315_pci_ctrl *apc = irq_desc_get_handler_data(desc);
        u32 pending = ar2315_pci_reg_read(apc, AR2315_PCI_ISR) &
                      ar2315_pci_reg_read(apc, AR2315_PCI_IMR);
-       unsigned pci_irq = 0;
+       int ret = 0;
 
        if (pending)
-               pci_irq = irq_find_mapping(apc->domain, __ffs(pending));
+               ret = generic_handle_domain_irq(apc->domain, __ffs(pending));
 
-       if (pci_irq)
-               generic_handle_irq(pci_irq);
-       else
+       if (!pending || ret)
                spurious_interrupt();
 }
 
index c48e23c..d3c947f 100644 (file)
@@ -140,10 +140,9 @@ static void rt3883_pci_irq_handler(struct irq_desc *desc)
        }
 
        while (pending) {
-               unsigned irq, bit = __ffs(pending);
+               unsigned bit = __ffs(pending);
 
-               irq = irq_find_mapping(rpc->irq_domain, bit);
-               generic_handle_irq(irq);
+               generic_handle_domain_irq(rpc->irq_domain, bit);
 
                pending &= ~BIT(bit);
        }
index 220ca0c..fa353bc 100644 (file)
@@ -100,7 +100,7 @@ static void ralink_intc_irq_handler(struct irq_desc *desc)
 
        if (pending) {
                struct irq_domain *domain = irq_desc_get_handler_data(desc);
-               generic_handle_irq(irq_find_mapping(domain, __ffs(pending)));
+               generic_handle_domain_irq(domain, __ffs(pending));
        } else {
                spurious_interrupt();
        }
index 95c1bff..a0dd3bd 100644 (file)
@@ -190,7 +190,7 @@ static void ip27_do_irq_mask0(struct irq_desc *desc)
        unsigned long *mask = per_cpu(irq_enable_mask, cpu);
        struct irq_domain *domain;
        u64 pend0;
-       int irq;
+       int ret;
 
        /* copied from Irix intpend0() */
        pend0 = LOCAL_HUB_L(PI_INT_PEND0);
@@ -216,10 +216,8 @@ static void ip27_do_irq_mask0(struct irq_desc *desc)
 #endif
        {
                domain = irq_desc_get_handler_data(desc);
-               irq = irq_linear_revmap(domain, __ffs(pend0));
-               if (irq)
-                       generic_handle_irq(irq);
-               else
+               ret = generic_handle_domain_irq(domain, __ffs(pend0));
+               if (ret)
                        spurious_interrupt();
        }
 
@@ -232,7 +230,7 @@ static void ip27_do_irq_mask1(struct irq_desc *desc)
        unsigned long *mask = per_cpu(irq_enable_mask, cpu);
        struct irq_domain *domain;
        u64 pend1;
-       int irq;
+       int ret;
 
        /* copied from Irix intpend0() */
        pend1 = LOCAL_HUB_L(PI_INT_PEND1);
@@ -242,10 +240,8 @@ static void ip27_do_irq_mask1(struct irq_desc *desc)
                return;
 
        domain = irq_desc_get_handler_data(desc);
-       irq = irq_linear_revmap(domain, __ffs(pend1) + 64);
-       if (irq)
-               generic_handle_irq(irq);
-       else
+       ret = generic_handle_domain_irq(domain, __ffs(pend1) + 64);
+       if (ret)
                spurious_interrupt();
 
        LOCAL_HUB_L(PI_INT_PEND1);
index ba87704..423c32c 100644 (file)
@@ -99,7 +99,7 @@ static void ip30_normal_irq(struct irq_desc *desc)
        int cpu = smp_processor_id();
        struct irq_domain *domain;
        u64 pend, mask;
-       int irq;
+       int ret;
 
        pend = heart_read(&heart_regs->isr);
        mask = (heart_read(&heart_regs->imr[cpu]) &
@@ -130,10 +130,8 @@ static void ip30_normal_irq(struct irq_desc *desc)
 #endif
        {
                domain = irq_desc_get_handler_data(desc);
-               irq = irq_linear_revmap(domain, __ffs(pend));
-               if (irq)
-                       generic_handle_irq(irq);
-               else
+               ret = generic_handle_domain_irq(domain, __ffs(pend));
+               if (ret)
                        spurious_interrupt();
        }
 }
index c6a1a9f..6b7890e 100644 (file)
@@ -19,11 +19,9 @@ static u32 ienable;
 asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs)
 {
        struct pt_regs *oldregs = set_irq_regs(regs);
-       int irq;
 
        irq_enter();
-       irq = irq_find_mapping(NULL, hwirq);
-       generic_handle_irq(irq);
+       generic_handle_domain_irq(NULL, hwirq);
        irq_exit();
 
        set_irq_regs(oldregs);
index bde9907..4f8c1fb 100644 (file)
@@ -3,7 +3,6 @@ config PARISC
        def_bool y
        select ARCH_32BIT_OFF_T if !64BIT
        select ARCH_MIGHT_HAVE_PC_PARPORT
-       select HAVE_IDE
        select HAVE_FUNCTION_TRACER
        select HAVE_FUNCTION_GRAPH_TRACER
        select HAVE_SYSCALL_TRACEPOINTS
index 4a0c9db..f6e1132 100644 (file)
@@ -8,19 +8,4 @@ extern void * memset(void *, int, size_t);
 #define __HAVE_ARCH_MEMCPY
 void * memcpy(void * dest,const void *src,size_t count);
 
-#define __HAVE_ARCH_STRLEN
-extern size_t strlen(const char *s);
-
-#define __HAVE_ARCH_STRCPY
-extern char *strcpy(char *dest, const char *src);
-
-#define __HAVE_ARCH_STRNCPY
-extern char *strncpy(char *dest, const char *src, size_t count);
-
-#define __HAVE_ARCH_STRCAT
-extern char *strcat(char *dest, const char *src);
-
-#define __HAVE_ARCH_MEMSET
-extern void *memset(void *, int, size_t);
-
 #endif
index 8ed409e..e8a6a75 100644 (file)
 
 #include <linux/string.h>
 EXPORT_SYMBOL(memset);
-EXPORT_SYMBOL(strlen);
-EXPORT_SYMBOL(strcpy);
-EXPORT_SYMBOL(strncpy);
-EXPORT_SYMBOL(strcat);
 
 #include <linux/atomic.h>
 EXPORT_SYMBOL(__xchg8);
index 2d7a997..7b19766 100644 (file)
@@ -3,7 +3,7 @@
 # Makefile for parisc-specific library files
 #
 
-lib-y  := lusercopy.o bitops.o checksum.o io.o memcpy.o \
-          ucmpdi2.o delay.o string.o
+lib-y  := lusercopy.o bitops.o checksum.o io.o memset.o memcpy.o \
+          ucmpdi2.o delay.o
 
 obj-y  := iomap.o
diff --git a/arch/parisc/lib/memset.c b/arch/parisc/lib/memset.c
new file mode 100644 (file)
index 0000000..133e480
--- /dev/null
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#include <linux/types.h>
+#include <asm/string.h>
+
+#define OPSIZ (BITS_PER_LONG/8)
+typedef unsigned long op_t;
+
+void *
+memset (void *dstpp, int sc, size_t len)
+{
+  unsigned int c = sc;
+  long int dstp = (long int) dstpp;
+
+  if (len >= 8)
+    {
+      size_t xlen;
+      op_t cccc;
+
+      cccc = (unsigned char) c;
+      cccc |= cccc << 8;
+      cccc |= cccc << 16;
+      if (OPSIZ > 4)
+       /* Do the shift in two steps to avoid warning if long has 32 bits.  */
+       cccc |= (cccc << 16) << 16;
+
+      /* There are at least some bytes to set.
+        No need to test for LEN == 0 in this alignment loop.  */
+      while (dstp % OPSIZ != 0)
+       {
+         ((unsigned char *) dstp)[0] = c;
+         dstp += 1;
+         len -= 1;
+       }
+
+      /* Write 8 `op_t' per iteration until less than 8 `op_t' remain.  */
+      xlen = len / (OPSIZ * 8);
+      while (xlen > 0)
+       {
+         ((op_t *) dstp)[0] = cccc;
+         ((op_t *) dstp)[1] = cccc;
+         ((op_t *) dstp)[2] = cccc;
+         ((op_t *) dstp)[3] = cccc;
+         ((op_t *) dstp)[4] = cccc;
+         ((op_t *) dstp)[5] = cccc;
+         ((op_t *) dstp)[6] = cccc;
+         ((op_t *) dstp)[7] = cccc;
+         dstp += 8 * OPSIZ;
+         xlen -= 1;
+       }
+      len %= OPSIZ * 8;
+
+      /* Write 1 `op_t' per iteration until less than OPSIZ bytes remain.  */
+      xlen = len / OPSIZ;
+      while (xlen > 0)
+       {
+         ((op_t *) dstp)[0] = cccc;
+         dstp += OPSIZ;
+         xlen -= 1;
+       }
+      len %= OPSIZ;
+    }
+
+  /* Write the last few bytes.  */
+  while (len > 0)
+    {
+      ((unsigned char *) dstp)[0] = c;
+      dstp += 1;
+      len -= 1;
+    }
+
+  return dstpp;
+}
diff --git a/arch/parisc/lib/string.S b/arch/parisc/lib/string.S
deleted file mode 100644 (file)
index 4a64264..0000000
+++ /dev/null
@@ -1,136 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- *    PA-RISC assembly string functions
- *
- *    Copyright (C) 2019 Helge Deller <deller@gmx.de>
- */
-
-#include <asm/assembly.h>
-#include <linux/linkage.h>
-
-       .section .text.hot
-       .level PA_ASM_LEVEL
-
-       t0 = r20
-       t1 = r21
-       t2 = r22
-
-ENTRY_CFI(strlen, frame=0,no_calls)
-       or,COND(<>) arg0,r0,ret0
-       b,l,n   .Lstrlen_null_ptr,r0
-       depwi   0,31,2,ret0
-       cmpb,COND(<>) arg0,ret0,.Lstrlen_not_aligned
-       ldw,ma  4(ret0),t0
-       cmpib,tr 0,r0,.Lstrlen_loop
-       uxor,nbz r0,t0,r0
-.Lstrlen_not_aligned:
-       uaddcm  arg0,ret0,t1
-       shladd  t1,3,r0,t1
-       mtsar   t1
-       depwi   -1,%sar,32,t0
-       uxor,nbz r0,t0,r0
-.Lstrlen_loop:
-       b,l,n   .Lstrlen_end_loop,r0
-       ldw,ma  4(ret0),t0
-       cmpib,tr 0,r0,.Lstrlen_loop
-       uxor,nbz r0,t0,r0
-.Lstrlen_end_loop:
-       extrw,u,<> t0,7,8,r0
-       addib,tr,n -3,ret0,.Lstrlen_out
-       extrw,u,<> t0,15,8,r0
-       addib,tr,n -2,ret0,.Lstrlen_out
-       extrw,u,<> t0,23,8,r0
-       addi    -1,ret0,ret0
-.Lstrlen_out:
-       bv r0(rp)
-       uaddcm ret0,arg0,ret0
-.Lstrlen_null_ptr:
-       bv,n r0(rp)
-ENDPROC_CFI(strlen)
-
-
-ENTRY_CFI(strcpy, frame=0,no_calls)
-       ldb     0(arg1),t0
-       stb     t0,0(arg0)
-       ldo     0(arg0),ret0
-       ldo     1(arg1),t1
-       cmpb,=  r0,t0,2f
-       ldo     1(arg0),t2
-1:     ldb     0(t1),arg1
-       stb     arg1,0(t2)
-       ldo     1(t1),t1
-       cmpb,<> r0,arg1,1b
-       ldo     1(t2),t2
-2:     bv,n    r0(rp)
-ENDPROC_CFI(strcpy)
-
-
-ENTRY_CFI(strncpy, frame=0,no_calls)
-       ldb     0(arg1),t0
-       stb     t0,0(arg0)
-       ldo     1(arg1),t1
-       ldo     0(arg0),ret0
-       cmpb,=  r0,t0,2f
-       ldo     1(arg0),arg1
-1:     ldo     -1(arg2),arg2
-       cmpb,COND(=),n r0,arg2,2f
-       ldb     0(t1),arg0
-       stb     arg0,0(arg1)
-       ldo     1(t1),t1
-       cmpb,<> r0,arg0,1b
-       ldo     1(arg1),arg1
-2:     bv,n    r0(rp)
-ENDPROC_CFI(strncpy)
-
-
-ENTRY_CFI(strcat, frame=0,no_calls)
-       ldb     0(arg0),t0
-       cmpb,=  t0,r0,2f
-       ldo     0(arg0),ret0
-       ldo     1(arg0),arg0
-1:     ldb     0(arg0),t1
-       cmpb,<>,n r0,t1,1b
-       ldo     1(arg0),arg0
-2:     ldb     0(arg1),t2
-       stb     t2,0(arg0)
-       ldo     1(arg0),arg0
-       ldb     0(arg1),t0
-       cmpb,<> r0,t0,2b
-       ldo     1(arg1),arg1
-       bv,n    r0(rp)
-ENDPROC_CFI(strcat)
-
-
-ENTRY_CFI(memset, frame=0,no_calls)
-       copy    arg0,ret0
-       cmpb,COND(=) r0,arg0,4f
-       copy    arg0,t2
-       cmpb,COND(=) r0,arg2,4f
-       ldo     -1(arg2),arg3
-       subi    -1,arg3,t0
-       subi    0,t0,t1
-       cmpiclr,COND(>=) 0,t1,arg2
-       ldo     -1(t1),arg2
-       extru arg2,31,2,arg0
-2:     stb     arg1,0(t2)
-       ldo     1(t2),t2
-       addib,>= -1,arg0,2b
-       ldo     -1(arg3),arg3
-       cmpiclr,COND(<=) 4,arg2,r0
-       b,l,n   4f,r0
-#ifdef CONFIG_64BIT
-       depd,*  r0,63,2,arg2
-#else
-       depw    r0,31,2,arg2
-#endif
-       ldo     1(t2),t2
-3:     stb     arg1,-1(t2)
-       stb     arg1,0(t2)
-       stb     arg1,1(t2)
-       stb     arg1,2(t2)
-       addib,COND(>) -4,arg2,3b
-       ldo     4(t2),t2
-4:     bv,n    r0(rp)
-ENDPROC_CFI(memset)
-
-       .end
index d01e340..663766f 100644 (file)
@@ -220,7 +220,6 @@ config PPC
        select HAVE_HARDLOCKUP_DETECTOR_ARCH    if PPC_BOOK3S_64 && SMP
        select HAVE_HARDLOCKUP_DETECTOR_PERF    if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH
        select HAVE_HW_BREAKPOINT               if PERF_EVENTS && (PPC_BOOK3S || PPC_8xx)
-       select HAVE_IDE
        select HAVE_IOREMAP_PROT
        select HAVE_IRQ_EXIT_ON_IRQ_STACK
        select HAVE_IRQ_TIME_ACCOUNTING
index 6420112..d4b145b 100644 (file)
@@ -4,6 +4,8 @@
 
 #include <asm/bug.h>
 #include <asm/book3s/32/mmu-hash.h>
+#include <asm/mmu.h>
+#include <asm/synch.h>
 
 #ifndef __ASSEMBLY__
 
@@ -28,6 +30,15 @@ static inline void kuep_lock(void)
                return;
 
        update_user_segments(mfsr(0) | SR_NX);
+       /*
+        * This isync() shouldn't be necessary as the kernel is not excepted to
+        * run any instruction in userspace soon after the update of segments,
+        * but hash based cores (at least G3) seem to exhibit a random
+        * behaviour when the 'isync' is not there. 603 cores don't have this
+        * behaviour so don't do the 'isync' as it saves several CPU cycles.
+        */
+       if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
+               isync();        /* Context sync required after mtsr() */
 }
 
 static inline void kuep_unlock(void)
@@ -36,6 +47,15 @@ static inline void kuep_unlock(void)
                return;
 
        update_user_segments(mfsr(0) & ~SR_NX);
+       /*
+        * This isync() shouldn't be necessary as a 'rfi' will soon be executed
+        * to return to userspace, but hash based cores (at least G3) seem to
+        * exhibit a random behaviour when the 'isync' is not there. 603 cores
+        * don't have this behaviour so don't do the 'isync' as it saves several
+        * CPU cycles.
+        */
+       if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
+               isync();        /* Context sync required after mtsr() */
 }
 
 #ifdef CONFIG_PPC_KUAP
index d4bdf7d..6b800d3 100644 (file)
@@ -583,6 +583,9 @@ DECLARE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode);
 
 DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException);
 
+/* irq.c */
+DECLARE_INTERRUPT_HANDLER_ASYNC(do_IRQ);
+
 void __noreturn unrecoverable_exception(struct pt_regs *regs);
 
 void replay_system_reset(void);
index 4982f37..2b32785 100644 (file)
@@ -52,7 +52,7 @@ extern void *mcheckirq_ctx[NR_CPUS];
 extern void *hardirq_ctx[NR_CPUS];
 extern void *softirq_ctx[NR_CPUS];
 
-extern void do_IRQ(struct pt_regs *regs);
+void __do_IRQ(struct pt_regs *regs);
 extern void __init init_IRQ(void);
 extern void __do_irq(struct pt_regs *regs);
 
index 3e5d470..14422e8 100644 (file)
@@ -70,6 +70,22 @@ struct pt_regs
                unsigned long __pad[4]; /* Maintain 16 byte interrupt stack alignment */
        };
 #endif
+#if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
+       struct { /* Must be a multiple of 16 bytes */
+               unsigned long mas0;
+               unsigned long mas1;
+               unsigned long mas2;
+               unsigned long mas3;
+               unsigned long mas6;
+               unsigned long mas7;
+               unsigned long srr0;
+               unsigned long srr1;
+               unsigned long csrr0;
+               unsigned long csrr1;
+               unsigned long dsrr0;
+               unsigned long dsrr1;
+       };
+#endif
 };
 #endif
 
index a47eefa..5bee245 100644 (file)
@@ -309,24 +309,21 @@ int main(void)
        STACK_PT_REGS_OFFSET(STACK_REGS_IAMR, iamr);
 #endif
 
-#if defined(CONFIG_PPC32)
-#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
-       DEFINE(EXC_LVL_SIZE, STACK_EXC_LVL_FRAME_SIZE);
-       DEFINE(MAS0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0));
+#if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
+       STACK_PT_REGS_OFFSET(MAS0, mas0);
        /* we overload MMUCR for 44x on MAS0 since they are mutually exclusive */
-       DEFINE(MMUCR, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0));
-       DEFINE(MAS1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas1));
-       DEFINE(MAS2, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas2));
-       DEFINE(MAS3, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas3));
-       DEFINE(MAS6, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas6));
-       DEFINE(MAS7, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas7));
-       DEFINE(_SRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr0));
-       DEFINE(_SRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr1));
-       DEFINE(_CSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr0));
-       DEFINE(_CSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr1));
-       DEFINE(_DSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr0));
-       DEFINE(_DSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr1));
-#endif
+       STACK_PT_REGS_OFFSET(MMUCR, mas0);
+       STACK_PT_REGS_OFFSET(MAS1, mas1);
+       STACK_PT_REGS_OFFSET(MAS2, mas2);
+       STACK_PT_REGS_OFFSET(MAS3, mas3);
+       STACK_PT_REGS_OFFSET(MAS6, mas6);
+       STACK_PT_REGS_OFFSET(MAS7, mas7);
+       STACK_PT_REGS_OFFSET(_SRR0, srr0);
+       STACK_PT_REGS_OFFSET(_SRR1, srr1);
+       STACK_PT_REGS_OFFSET(_CSRR0, csrr0);
+       STACK_PT_REGS_OFFSET(_CSRR1, csrr1);
+       STACK_PT_REGS_OFFSET(_DSRR0, dsrr0);
+       STACK_PT_REGS_OFFSET(_DSRR1, dsrr1);
 #endif
 
        /* About the CPU features table */
index 4aec59a..37859e6 100644 (file)
@@ -812,7 +812,6 @@ __start_interrupts:
  * syscall register convention is in Documentation/powerpc/syscall64-abi.rst
  */
 EXC_VIRT_BEGIN(system_call_vectored, 0x3000, 0x1000)
-1:
        /* SCV 0 */
        mr      r9,r13
        GET_PACA(r13)
@@ -842,10 +841,12 @@ EXC_VIRT_BEGIN(system_call_vectored, 0x3000, 0x1000)
        b       system_call_vectored_sigill
 #endif
        .endr
-2:
 EXC_VIRT_END(system_call_vectored, 0x3000, 0x1000)
 
-SOFT_MASK_TABLE(1b, 2b) // Treat scv vectors as soft-masked, see comment above.
+// Treat scv vectors as soft-masked, see comment above.
+// Use absolute values rather than labels here, so they don't get relocated,
+// because this code runs unrelocated.
+SOFT_MASK_TABLE(0xc000000000003000, 0xc000000000004000)
 
 #ifdef CONFIG_RELOCATABLE
 TRAMP_VIRT_BEGIN(system_call_vectored_tramp)
index 764edd8..68e5c0a 100644 (file)
@@ -300,7 +300,7 @@ ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
        EXCEPTION_PROLOG_1
        EXCEPTION_PROLOG_2 INTERRUPT_DATA_STORAGE DataAccess handle_dar_dsisr=1
        prepare_transfer_to_handler
-       lwz     r5, _DSISR(r11)
+       lwz     r5, _DSISR(r1)
        andis.  r0, r5, DSISR_DABRMATCH@h
        bne-    1f
        bl      do_page_fault
index 87b806e..e550342 100644 (file)
@@ -168,20 +168,18 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
 /* only on e500mc */
 #define DBG_STACK_BASE         dbgirq_ctx
 
-#define EXC_LVL_FRAME_OVERHEAD (THREAD_SIZE - INT_FRAME_SIZE - EXC_LVL_SIZE)
-
 #ifdef CONFIG_SMP
 #define BOOKE_LOAD_EXC_LEVEL_STACK(level)              \
        mfspr   r8,SPRN_PIR;                            \
        slwi    r8,r8,2;                                \
        addis   r8,r8,level##_STACK_BASE@ha;            \
        lwz     r8,level##_STACK_BASE@l(r8);            \
-       addi    r8,r8,EXC_LVL_FRAME_OVERHEAD;
+       addi    r8,r8,THREAD_SIZE - INT_FRAME_SIZE;
 #else
 #define BOOKE_LOAD_EXC_LEVEL_STACK(level)              \
        lis     r8,level##_STACK_BASE@ha;               \
        lwz     r8,level##_STACK_BASE@l(r8);            \
-       addi    r8,r8,EXC_LVL_FRAME_OVERHEAD;
+       addi    r8,r8,THREAD_SIZE - INT_FRAME_SIZE;
 #endif
 
 /*
@@ -208,7 +206,7 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
        mtmsr   r11;                                                    \
        mfspr   r11,SPRN_SPRG_THREAD;   /* if from user, start at top of   */\
        lwz     r11, TASK_STACK - THREAD(r11); /* this thread's kernel stack */\
-       addi    r11,r11,EXC_LVL_FRAME_OVERHEAD; /* allocate stack frame    */\
+       addi    r11,r11,THREAD_SIZE - INT_FRAME_SIZE;   /* allocate stack frame    */\
        beq     1f;                                                          \
        /* COMING FROM USER MODE */                                          \
        stw     r9,_CCR(r11);           /* save CR                         */\
@@ -516,24 +514,5 @@ label:
        bl      kernel_fp_unavailable_exception;                              \
        b       interrupt_return
 
-#else /* __ASSEMBLY__ */
-struct exception_regs {
-       unsigned long mas0;
-       unsigned long mas1;
-       unsigned long mas2;
-       unsigned long mas3;
-       unsigned long mas6;
-       unsigned long mas7;
-       unsigned long srr0;
-       unsigned long srr1;
-       unsigned long csrr0;
-       unsigned long csrr1;
-       unsigned long dsrr0;
-       unsigned long dsrr1;
-};
-
-/* ensure this structure is always sized to a multiple of the stack alignment */
-#define STACK_EXC_LVL_FRAME_SIZE       ALIGN(sizeof (struct exception_regs), 16)
-
 #endif /* __ASSEMBLY__ */
 #endif /* __HEAD_BOOKE_H__ */
index 91e63ea..551b653 100644 (file)
@@ -750,7 +750,7 @@ void __do_irq(struct pt_regs *regs)
        trace_irq_exit(regs);
 }
 
-DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ)
+void __do_IRQ(struct pt_regs *regs)
 {
        struct pt_regs *old_regs = set_irq_regs(regs);
        void *cursp, *irqsp, *sirqsp;
@@ -774,6 +774,11 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ)
        set_irq_regs(old_regs);
 }
 
+DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ)
+{
+       __do_IRQ(regs);
+}
+
 static void *__init alloc_vm_stack(void)
 {
        return __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, THREADINFO_GFP,
index cbc28d1..7a7cd6b 100644 (file)
@@ -292,7 +292,8 @@ int kprobe_handler(struct pt_regs *regs)
        if (user_mode(regs))
                return 0;
 
-       if (!(regs->msr & MSR_IR) || !(regs->msr & MSR_DR))
+       if (!IS_ENABLED(CONFIG_BOOKE) &&
+           (!(regs->msr & MSR_IR) || !(regs->msr & MSR_DR)))
                return 0;
 
        /*
index 5ff0e55..defecb3 100644 (file)
@@ -1167,7 +1167,7 @@ static int __init topology_init(void)
                 * CPU.  For instance, the boot cpu might never be valid
                 * for hotplugging.
                 */
-               if (smp_ops->cpu_offline_self)
+               if (smp_ops && smp_ops->cpu_offline_self)
                        c->hotpluggable = 1;
 #endif
 
index e45ce42..c487ba5 100644 (file)
@@ -586,7 +586,7 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt)
 
 #if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
        if (atomic_read(&ppc_n_lost_interrupts) != 0)
-               do_IRQ(regs);
+               __do_IRQ(regs);
 #endif
 
        old_regs = set_irq_regs(regs);
index dfbce52..d56254f 100644 (file)
@@ -1104,7 +1104,7 @@ DEFINE_INTERRUPT_HANDLER(RunModeException)
        _exception(SIGTRAP, regs, TRAP_UNK, 0);
 }
 
-DEFINE_INTERRUPT_HANDLER(single_step_exception)
+static void __single_step_exception(struct pt_regs *regs)
 {
        clear_single_step(regs);
        clear_br_trace(regs);
@@ -1121,6 +1121,11 @@ DEFINE_INTERRUPT_HANDLER(single_step_exception)
        _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
 }
 
+DEFINE_INTERRUPT_HANDLER(single_step_exception)
+{
+       __single_step_exception(regs);
+}
+
 /*
  * After we have successfully emulated an instruction, we have to
  * check if the instruction was being single-stepped, and if so,
@@ -1130,7 +1135,7 @@ DEFINE_INTERRUPT_HANDLER(single_step_exception)
 static void emulate_single_step(struct pt_regs *regs)
 {
        if (single_stepping(regs))
-               single_step_exception(regs);
+               __single_step_exception(regs);
 }
 
 static inline int __parse_fpscr(unsigned long fpscr)
index 2813e3f..3c5baaa 100644 (file)
@@ -27,6 +27,13 @@ KASAN_SANITIZE := n
 
 ccflags-y := -shared -fno-common -fno-builtin -nostdlib \
        -Wl,-soname=linux-vdso64.so.1 -Wl,--hash-style=both
+
+# Go prior to 1.16.x assumes r30 is not clobbered by any VDSO code. That used to be true
+# by accident when the VDSO was hand-written asm code, but may not be now that the VDSO is
+# compiler generated. To avoid breaking Go tell GCC not to use r30. Impact on code
+# generation is minimal, it will just use r29 instead.
+ccflags-y += $(call cc-option, -ffixed-r30)
+
 asflags-y := -D__VDSO64__ -s
 
 targets += vdso64.lds
index 0876216..edea388 100644 (file)
 /*
  * Updates the attributes of a page in three steps:
  *
- * 1. invalidate the page table entry
- * 2. flush the TLB
- * 3. install the new entry with the updated attributes
- *
- * Invalidating the pte means there are situations where this will not work
- * when in theory it should.
- * For example:
- * - removing write from page whilst it is being executed
- * - setting a page read-only whilst it is being read by another CPU
+ * 1. take the page_table_lock
+ * 2. install the new entry with the updated attributes
+ * 3. flush the TLB
  *
+ * This sequence is safe against concurrent updates, and also allows updating the
+ * attributes of a page currently being executed or accessed.
  */
 static int change_page_attr(pte_t *ptep, unsigned long addr, void *data)
 {
@@ -36,9 +32,7 @@ static int change_page_attr(pte_t *ptep, unsigned long addr, void *data)
 
        spin_lock(&init_mm.page_table_lock);
 
-       /* invalidate the PTE so it's safe to modify */
-       pte = ptep_get_and_clear(&init_mm, addr, ptep);
-       flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+       pte = ptep_get(ptep);
 
        /* modify the PTE bits as desired, then apply */
        switch (action) {
@@ -59,11 +53,14 @@ static int change_page_attr(pte_t *ptep, unsigned long addr, void *data)
                break;
        }
 
-       set_pte_at(&init_mm, addr, ptep, pte);
+       pte_update(&init_mm, addr, ptep, ~0UL, pte_val(pte), 0);
 
        /* See ptesync comment in radix__set_pte_at() */
        if (radix_enabled())
                asm volatile("ptesync": : :"memory");
+
+       flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+
        spin_unlock(&init_mm.page_table_lock);
 
        return 0;
index 34bb158..beb12cb 100644 (file)
@@ -737,6 +737,12 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
                        }
                        break;
 
+               /*
+                * BPF_ST NOSPEC (speculation barrier)
+                */
+               case BPF_ST | BPF_NOSPEC:
+                       break;
+
                /*
                 * BPF_ST(X)
                 */
index de85958..b87a63d 100644 (file)
@@ -627,6 +627,12 @@ emit_clear:
                        }
                        break;
 
+               /*
+                * BPF_ST NOSPEC (speculation barrier)
+                */
+               case BPF_ST | BPF_NOSPEC:
+                       break;
+
                /*
                 * BPF_ST(X)
                 */
index 36fb66c..89e2587 100644 (file)
@@ -198,7 +198,6 @@ static void uic_irq_cascade(struct irq_desc *desc)
        struct uic *uic = irq_desc_get_handler_data(desc);
        u32 msr;
        int src;
-       int subvirq;
 
        raw_spin_lock(&desc->lock);
        if (irqd_is_level_type(idata))
@@ -213,8 +212,7 @@ static void uic_irq_cascade(struct irq_desc *desc)
 
        src = 32 - ffs(msr);
 
-       subvirq = irq_linear_revmap(uic->irqhost, src);
-       generic_handle_irq(subvirq);
+       generic_handle_domain_irq(uic->irqhost, src);
 
 uic_irq_ret:
        raw_spin_lock(&desc->lock);
index b298163..ea46870 100644 (file)
@@ -81,11 +81,10 @@ static struct irq_chip cpld_pic = {
        .irq_unmask = cpld_unmask_irq,
 };
 
-static int
+static unsigned int
 cpld_pic_get_irq(int offset, u8 ignore, u8 __iomem *statusp,
                            u8 __iomem *maskp)
 {
-       int cpld_irq;
        u8 status = in_8(statusp);
        u8 mask = in_8(maskp);
 
@@ -93,28 +92,26 @@ cpld_pic_get_irq(int offset, u8 ignore, u8 __iomem *statusp,
        status |= (ignore | mask);
 
        if (status == 0xff)
-               return 0;
-
-       cpld_irq = ffz(status) + offset;
+               return ~0;
 
-       return irq_linear_revmap(cpld_pic_host, cpld_irq);
+       return ffz(status) + offset;
 }
 
 static void cpld_pic_cascade(struct irq_desc *desc)
 {
-       unsigned int irq;
+       unsigned int hwirq;
 
-       irq = cpld_pic_get_irq(0, PCI_IGNORE, &cpld_regs->pci_status,
+       hwirq = cpld_pic_get_irq(0, PCI_IGNORE, &cpld_regs->pci_status,
                &cpld_regs->pci_mask);
-       if (irq) {
-               generic_handle_irq(irq);
+       if (hwirq != ~0) {
+               generic_handle_domain_irq(cpld_pic_host, hwirq);
                return;
        }
 
-       irq = cpld_pic_get_irq(8, MISC_IGNORE, &cpld_regs->misc_status,
+       hwirq = cpld_pic_get_irq(8, MISC_IGNORE, &cpld_regs->misc_status,
                &cpld_regs->misc_mask);
-       if (irq) {
-               generic_handle_irq(irq);
+       if (hwirq != ~0) {
+               generic_handle_domain_irq(cpld_pic_host, hwirq);
                return;
        }
 }
index efb8bde..110c444 100644 (file)
@@ -78,7 +78,7 @@ static struct irq_chip media5200_irq_chip = {
 static void media5200_irq_cascade(struct irq_desc *desc)
 {
        struct irq_chip *chip = irq_desc_get_chip(desc);
-       int sub_virq, val;
+       int val;
        u32 status, enable;
 
        /* Mask off the cascaded IRQ */
@@ -92,11 +92,10 @@ static void media5200_irq_cascade(struct irq_desc *desc)
        enable = in_be32(media5200_irq.regs + MEDIA5200_IRQ_STATUS);
        val = ffs((status & enable) >> MEDIA5200_IRQ_SHIFT);
        if (val) {
-               sub_virq = irq_linear_revmap(media5200_irq.irqhost, val - 1);
-               /* pr_debug("%s: virq=%i s=%.8x e=%.8x hwirq=%i subvirq=%i\n",
-                *          __func__, virq, status, enable, val - 1, sub_virq);
+               generic_handle_domain_irq(media5200_irq.irqhost, val - 1);
+               /* pr_debug("%s: virq=%i s=%.8x e=%.8x hwirq=%i\n",
+                *          __func__, virq, status, enable, val - 1);
                 */
-               generic_handle_irq(sub_virq);
        }
 
        /* Processing done; can reenable the cascade now */
index 3823df2..f862b48 100644 (file)
@@ -190,14 +190,11 @@ static struct irq_chip mpc52xx_gpt_irq_chip = {
 static void mpc52xx_gpt_irq_cascade(struct irq_desc *desc)
 {
        struct mpc52xx_gpt_priv *gpt = irq_desc_get_handler_data(desc);
-       int sub_virq;
        u32 status;
 
        status = in_be32(&gpt->regs->status) & MPC52xx_GPT_STATUS_IRQMASK;
-       if (status) {
-               sub_virq = irq_linear_revmap(gpt->irqhost, 0);
-               generic_handle_irq(sub_virq);
-       }
+       if (status)
+               generic_handle_domain_irq(gpt->irqhost, 0);
 }
 
 static int mpc52xx_gpt_irq_map(struct irq_domain *h, unsigned int virq,
index f82f75a..285bfe1 100644 (file)
@@ -91,10 +91,8 @@ static void pq2ads_pci_irq_demux(struct irq_desc *desc)
                        break;
 
                for (bit = 0; pend != 0; ++bit, pend <<= 1) {
-                       if (pend & 0x80000000) {
-                               int virq = irq_linear_revmap(priv->host, bit);
-                               generic_handle_irq(virq);
-                       }
+                       if (pend & 0x80000000)
+                               generic_handle_domain_irq(priv->host, bit);
                }
        }
 }
index 6794145..a208997 100644 (file)
@@ -98,7 +98,7 @@ config PPC_BOOK3S_64
        select PPC_HAVE_PMU_SUPPORT
        select HAVE_ARCH_TRANSPARENT_HUGEPAGE
        select ARCH_ENABLE_HUGEPAGE_MIGRATION if HUGETLB_PAGE && MIGRATION
-       select ARCH_ENABLE_PMD_SPLIT_PTLOCK
+       select ARCH_ENABLE_SPLIT_PMD_PTLOCK
        select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE
        select ARCH_SUPPORTS_HUGETLBFS
        select ARCH_SUPPORTS_NUMA_BALANCING
index c0ab62b..0873a7a 100644 (file)
@@ -106,13 +106,9 @@ static void iic_ioexc_cascade(struct irq_desc *desc)
                        out_be64(&node_iic->iic_is, ack);
                /* handle them */
                for (cascade = 63; cascade >= 0; cascade--)
-                       if (bits & (0x8000000000000000UL >> cascade)) {
-                               unsigned int cirq =
-                                       irq_linear_revmap(iic_host,
+                       if (bits & (0x8000000000000000UL >> cascade))
+                               generic_handle_domain_irq(iic_host,
                                                          base | cascade);
-                               if (cirq)
-                                       generic_handle_irq(cirq);
-                       }
                /* post-ack level interrupts */
                ack = bits & ~IIC_ISR_EDGE_MASK;
                if (ack)
index 210785f..8af7586 100644 (file)
@@ -190,16 +190,11 @@ static void spider_irq_cascade(struct irq_desc *desc)
 {
        struct irq_chip *chip = irq_desc_get_chip(desc);
        struct spider_pic *pic = irq_desc_get_handler_data(desc);
-       unsigned int cs, virq;
+       unsigned int cs;
 
        cs = in_be32(pic->regs + TIR_CS) >> 24;
-       if (cs == SPIDER_IRQ_INVALID)
-               virq = 0;
-       else
-               virq = irq_linear_revmap(pic->host, cs);
-
-       if (virq)
-               generic_handle_irq(virq);
+       if (cs != SPIDER_IRQ_INVALID)
+               generic_handle_domain_irq(pic->host, cs);
 
        chip->irq_eoi(&desc->irq_data);
 }
index a1b7f79..1539633 100644 (file)
@@ -108,7 +108,6 @@ static const struct irq_domain_ops hlwd_irq_domain_ops = {
 static unsigned int __hlwd_pic_get_irq(struct irq_domain *h)
 {
        void __iomem *io_base = h->host_data;
-       int irq;
        u32 irq_status;
 
        irq_status = in_be32(io_base + HW_BROADWAY_ICR) &
@@ -116,23 +115,22 @@ static unsigned int __hlwd_pic_get_irq(struct irq_domain *h)
        if (irq_status == 0)
                return 0;       /* no more IRQs pending */
 
-       irq = __ffs(irq_status);
-       return irq_linear_revmap(h, irq);
+       return __ffs(irq_status);
 }
 
 static void hlwd_pic_irq_cascade(struct irq_desc *desc)
 {
        struct irq_chip *chip = irq_desc_get_chip(desc);
        struct irq_domain *irq_domain = irq_desc_get_handler_data(desc);
-       unsigned int virq;
+       unsigned int hwirq;
 
        raw_spin_lock(&desc->lock);
        chip->irq_mask(&desc->irq_data); /* IRQ_LEVEL */
        raw_spin_unlock(&desc->lock);
 
-       virq = __hlwd_pic_get_irq(irq_domain);
-       if (virq)
-               generic_handle_irq(virq);
+       hwirq = __hlwd_pic_get_irq(irq_domain);
+       if (hwirq)
+               generic_handle_domain_irq(irq_domain, hwirq);
        else
                pr_err("spurious interrupt!\n");
 
@@ -190,7 +188,8 @@ static struct irq_domain *hlwd_pic_init(struct device_node *np)
 
 unsigned int hlwd_pic_get_irq(void)
 {
-       return __hlwd_pic_get_irq(hlwd_irq_host);
+       unsigned int hwirq = __hlwd_pic_get_irq(hlwd_irq_host);
+       return hwirq ? irq_linear_revmap(hlwd_irq_host, hwirq) : 0;
 }
 
 /*
index c164419..d55652b 100644 (file)
@@ -46,18 +46,15 @@ void opal_handle_events(void)
        e = READ_ONCE(last_outstanding_events) & opal_event_irqchip.mask;
 again:
        while (e) {
-               int virq, hwirq;
+               int hwirq;
 
                hwirq = fls64(e) - 1;
                e &= ~BIT_ULL(hwirq);
 
                local_irq_disable();
-               virq = irq_find_mapping(opal_event_irqchip.domain, hwirq);
-               if (virq) {
-                       irq_enter();
-                       generic_handle_irq(virq);
-                       irq_exit();
-               }
+               irq_enter();
+               generic_handle_domain_irq(opal_event_irqchip.domain, hwirq);
+               irq_exit();
                local_irq_enable();
 
                cond_resched();
index 631a0d5..0dfaa6a 100644 (file)
@@ -77,7 +77,7 @@
 #include "../../../../drivers/pci/pci.h"
 
 DEFINE_STATIC_KEY_FALSE(shared_processor);
-EXPORT_SYMBOL_GPL(shared_processor);
+EXPORT_SYMBOL(shared_processor);
 
 int CMO_PrPSP = -1;
 int CMO_SecPSP = -1;
@@ -539,9 +539,10 @@ static void init_cpu_char_feature_flags(struct h_cpu_char_result *result)
         * H_CPU_BEHAV_FAVOUR_SECURITY_H could be set only if
         * H_CPU_BEHAV_FAVOUR_SECURITY is.
         */
-       if (!(result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY))
+       if (!(result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY)) {
                security_ftr_clear(SEC_FTR_FAVOUR_SECURITY);
-       else if (result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY_H)
+               pseries_security_flavor = 0;
+       } else if (result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY_H)
                pseries_security_flavor = 1;
        else
                pseries_security_flavor = 2;
index 5fa5fa2..9a98bb2 100644 (file)
@@ -99,7 +99,6 @@ static irqreturn_t fsl_error_int_handler(int irq, void *data)
        struct mpic *mpic = (struct mpic *) data;
        u32 eisr, eimr;
        int errint;
-       unsigned int cascade_irq;
 
        eisr = mpic_fsl_err_read(mpic->err_regs, MPIC_ERR_INT_EISR);
        eimr = mpic_fsl_err_read(mpic->err_regs, MPIC_ERR_INT_EIMR);
@@ -108,13 +107,11 @@ static irqreturn_t fsl_error_int_handler(int irq, void *data)
                return IRQ_NONE;
 
        while (eisr) {
+               int ret;
                errint = __builtin_clz(eisr);
-               cascade_irq = irq_linear_revmap(mpic->irqhost,
-                                mpic->err_int_vecs[errint]);
-               WARN_ON(!cascade_irq);
-               if (cascade_irq) {
-                       generic_handle_irq(cascade_irq);
-               } else {
+               ret = generic_handle_domain_irq(mpic->irqhost,
+                                               mpic->err_int_vecs[errint]);
+               if (WARN_ON(ret)) {
                        eimr |=  1 << (31 - errint);
                        mpic_fsl_err_write(mpic->err_regs, eimr);
                }
index 808e711..e6b06c3 100644 (file)
@@ -266,7 +266,6 @@ out_free:
 
 static irqreturn_t fsl_msi_cascade(int irq, void *data)
 {
-       unsigned int cascade_irq;
        struct fsl_msi *msi_data;
        int msir_index = -1;
        u32 msir_value = 0;
@@ -279,9 +278,6 @@ static irqreturn_t fsl_msi_cascade(int irq, void *data)
 
        msir_index = cascade_data->index;
 
-       if (msir_index >= NR_MSI_REG_MAX)
-               cascade_irq = 0;
-
        switch (msi_data->feature & FSL_PIC_IP_MASK) {
        case FSL_PIC_IP_MPIC:
                msir_value = fsl_msi_read(msi_data->msi_regs,
@@ -305,15 +301,15 @@ static irqreturn_t fsl_msi_cascade(int irq, void *data)
        }
 
        while (msir_value) {
+               int err;
                intr_index = ffs(msir_value) - 1;
 
-               cascade_irq = irq_linear_revmap(msi_data->irqhost,
+               err = generic_handle_domain_irq(msi_data->irqhost,
                                msi_hwirq(msi_data, msir_index,
                                          intr_index + have_shift));
-               if (cascade_irq) {
-                       generic_handle_irq(cascade_irq);
+               if (!err)
                        ret = IRQ_HANDLED;
-               }
+
                have_shift += intr_index + 1;
                msir_value = msir_value >> (intr_index + 1);
        }
index dbdbbc2..8183ca3 100644 (file)
@@ -67,6 +67,7 @@ static struct irq_domain *xive_irq_domain;
 static struct xive_ipi_desc {
        unsigned int irq;
        char name[16];
+       atomic_t started;
 } *xive_ipis;
 
 /*
@@ -1120,7 +1121,7 @@ static const struct irq_domain_ops xive_ipi_irq_domain_ops = {
        .alloc  = xive_ipi_irq_domain_alloc,
 };
 
-static int __init xive_request_ipi(void)
+static int __init xive_init_ipis(void)
 {
        struct fwnode_handle *fwnode;
        struct irq_domain *ipi_domain;
@@ -1144,10 +1145,6 @@ static int __init xive_request_ipi(void)
                struct xive_ipi_desc *xid = &xive_ipis[node];
                struct xive_ipi_alloc_info info = { node };
 
-               /* Skip nodes without CPUs */
-               if (cpumask_empty(cpumask_of_node(node)))
-                       continue;
-
                /*
                 * Map one IPI interrupt per node for all cpus of that node.
                 * Since the HW interrupt number doesn't have any meaning,
@@ -1159,11 +1156,6 @@ static int __init xive_request_ipi(void)
                xid->irq = ret;
 
                snprintf(xid->name, sizeof(xid->name), "IPI-%d", node);
-
-               ret = request_irq(xid->irq, xive_muxed_ipi_action,
-                                 IRQF_PERCPU | IRQF_NO_THREAD, xid->name, NULL);
-
-               WARN(ret < 0, "Failed to request IPI %d: %d\n", xid->irq, ret);
        }
 
        return ret;
@@ -1178,6 +1170,22 @@ out:
        return ret;
 }
 
+static int xive_request_ipi(unsigned int cpu)
+{
+       struct xive_ipi_desc *xid = &xive_ipis[early_cpu_to_node(cpu)];
+       int ret;
+
+       if (atomic_inc_return(&xid->started) > 1)
+               return 0;
+
+       ret = request_irq(xid->irq, xive_muxed_ipi_action,
+                         IRQF_PERCPU | IRQF_NO_THREAD,
+                         xid->name, NULL);
+
+       WARN(ret < 0, "Failed to request IPI %d: %d\n", xid->irq, ret);
+       return ret;
+}
+
 static int xive_setup_cpu_ipi(unsigned int cpu)
 {
        unsigned int xive_ipi_irq = xive_ipi_cpu_to_irq(cpu);
@@ -1192,6 +1200,9 @@ static int xive_setup_cpu_ipi(unsigned int cpu)
        if (xc->hw_ipi != XIVE_BAD_IRQ)
                return 0;
 
+       /* Register the IPI */
+       xive_request_ipi(cpu);
+
        /* Grab an IPI from the backend, this will populate xc->hw_ipi */
        if (xive_ops->get_ipi(cpu, xc))
                return -EIO;
@@ -1231,6 +1242,8 @@ static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc)
        if (xc->hw_ipi == XIVE_BAD_IRQ)
                return;
 
+       /* TODO: clear IPI mapping */
+
        /* Mask the IPI */
        xive_do_source_set_mask(&xc->ipi_data, true);
 
@@ -1253,7 +1266,7 @@ void __init xive_smp_probe(void)
        smp_ops->cause_ipi = xive_cause_ipi;
 
        /* Register the IPI */
-       xive_request_ipi();
+       xive_init_ipis();
 
        /* Allocate and setup IPI for the boot CPU */
        xive_setup_cpu_ipi(smp_processor_id());
index 8fcceb8..4f7b70a 100644 (file)
@@ -492,10 +492,16 @@ config CC_HAVE_STACKPROTECTOR_TLS
 
 config STACKPROTECTOR_PER_TASK
        def_bool y
+       depends on !GCC_PLUGIN_RANDSTRUCT
        depends on STACKPROTECTOR && CC_HAVE_STACKPROTECTOR_TLS
 
+config PHYS_RAM_BASE_FIXED
+       bool "Explicitly specified physical RAM address"
+       default n
+
 config PHYS_RAM_BASE
        hex "Platform Physical RAM address"
+       depends on PHYS_RAM_BASE_FIXED
        default "0x80000000"
        help
          This is the physical address of RAM in the system. It has to be
@@ -508,6 +514,7 @@ config XIP_KERNEL
        # This prevents XIP from being enabled by all{yes,mod}config, which
        # fail to build since XIP doesn't support large kernels.
        depends on !COMPILE_TEST
+       select PHYS_RAM_BASE_FIXED
        help
          Execute-In-Place allows the kernel to run from non-volatile storage
          directly addressable by the CPU, such as NOR flash. This saves RAM
index ec79944..baea7d2 100644 (file)
        model = "Microchip PolarFire-SoC Icicle Kit";
        compatible = "microchip,mpfs-icicle-kit";
 
+       aliases {
+               ethernet0 = &emac1;
+       };
+
        chosen {
                stdout-path = &serial0;
        };
index b981957..9d2fbbc 100644 (file)
                        reg = <0x0 0x20112000 0x0 0x2000>;
                        interrupt-parent = <&plic>;
                        interrupts = <70 71 72 73>;
-                       mac-address = [00 00 00 00 00 00];
+                       local-mac-address = [00 00 00 00 00 00];
                        clocks = <&clkcfg 5>, <&clkcfg 2>;
                        status = "disabled";
                        clock-names = "pclk", "hclk";
index b1c3c59..2e4ea84 100644 (file)
@@ -24,7 +24,7 @@
 
        memory@80000000 {
                device_type = "memory";
-               reg = <0x0 0x80000000 0x2 0x00000000>;
+               reg = <0x0 0x80000000 0x4 0x00000000>;
        };
 
        soc {
index 1f2be23..bc68231 100644 (file)
@@ -132,7 +132,6 @@ CONFIG_DEBUG_PLIST=y
 CONFIG_DEBUG_SG=y
 # CONFIG_RCU_TRACE is not set
 CONFIG_RCU_EQS_DEBUG=y
-CONFIG_DEBUG_BLOCK_EXT_DEVT=y
 # CONFIG_FTRACE is not set
 # CONFIG_RUNTIME_TESTING_MENU is not set
 CONFIG_MEMTEST=y
index 8dd02b8..434ef5b 100644 (file)
@@ -127,7 +127,6 @@ CONFIG_DEBUG_PLIST=y
 CONFIG_DEBUG_SG=y
 # CONFIG_RCU_TRACE is not set
 CONFIG_RCU_EQS_DEBUG=y
-CONFIG_DEBUG_BLOCK_EXT_DEVT=y
 # CONFIG_FTRACE is not set
 # CONFIG_RUNTIME_TESTING_MENU is not set
 CONFIG_MEMTEST=y
index cca8764..b0ca505 100644 (file)
@@ -103,6 +103,7 @@ struct kernel_mapping {
 };
 
 extern struct kernel_mapping kernel_map;
+extern phys_addr_t phys_ram_base;
 
 #ifdef CONFIG_64BIT
 #define is_kernel_mapping(x)   \
@@ -113,9 +114,9 @@ extern struct kernel_mapping kernel_map;
 #define linear_mapping_pa_to_va(x)     ((void *)((unsigned long)(x) + kernel_map.va_pa_offset))
 #define kernel_mapping_pa_to_va(y)     ({                                              \
        unsigned long _y = y;                                                           \
-       (_y >= CONFIG_PHYS_RAM_BASE) ?                                                  \
-               (void *)((unsigned long)(_y) + kernel_map.va_kernel_pa_offset + XIP_OFFSET) :   \
-               (void *)((unsigned long)(_y) + kernel_map.va_kernel_xip_pa_offset);             \
+       (IS_ENABLED(CONFIG_XIP_KERNEL) && _y < phys_ram_base) ?                                 \
+               (void *)((unsigned long)(_y) + kernel_map.va_kernel_xip_pa_offset) :            \
+               (void *)((unsigned long)(_y) + kernel_map.va_kernel_pa_offset + XIP_OFFSET);    \
        })
 #define __pa_to_va_nodebug(x)          linear_mapping_pa_to_va(x)
 
index d3081e4..3397dda 100644 (file)
@@ -11,7 +11,7 @@ endif
 CFLAGS_syscall_table.o += $(call cc-option,-Wno-override-init,)
 
 ifdef CONFIG_KEXEC
-AFLAGS_kexec_relocate.o := -mcmodel=medany -mno-relax
+AFLAGS_kexec_relocate.o := -mcmodel=medany $(call cc-option,-mno-relax)
 endif
 
 extra-y += head.o
index 1a85305..9c05111 100644 (file)
@@ -10,6 +10,7 @@
 #include <asm/ptrace.h>
 #include <asm/syscall.h>
 #include <asm/thread_info.h>
+#include <asm/switch_to.h>
 #include <linux/audit.h>
 #include <linux/ptrace.h>
 #include <linux/elf.h>
@@ -56,6 +57,9 @@ static int riscv_fpr_get(struct task_struct *target,
 {
        struct __riscv_d_ext_state *fstate = &target->thread.fstate;
 
+       if (target == current)
+               fstate_save(current, task_pt_regs(current));
+
        membuf_write(&to, fstate, offsetof(struct __riscv_d_ext_state, fcsr));
        membuf_store(&to, fstate->fcsr);
        return membuf_zero(&to, 4);     // explicitly pad
index 18bd0e4..120b2f6 100644 (file)
@@ -229,8 +229,8 @@ static void __init init_resources(void)
        }
 
        /* Clean-up any unused pre-allocated resources */
-       mem_res_sz = (num_resources - res_idx + 1) * sizeof(*mem_res);
-       memblock_free(__pa(mem_res), mem_res_sz);
+       if (res_idx >= 0)
+               memblock_free(__pa(mem_res), (res_idx + 1) * sizeof(*mem_res));
        return;
 
  error:
index ac75936..315db3d 100644 (file)
@@ -27,7 +27,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
                fp = frame_pointer(regs);
                sp = user_stack_pointer(regs);
                pc = instruction_pointer(regs);
-       } else if (task == current) {
+       } else if (task == NULL || task == current) {
                fp = (unsigned long)__builtin_frame_address(1);
                sp = (unsigned long)__builtin_frame_address(0);
                pc = (unsigned long)__builtin_return_address(0);
index a14bf39..7cb4f39 100644 (file)
@@ -36,6 +36,9 @@ EXPORT_SYMBOL(kernel_map);
 #define kernel_map     (*(struct kernel_mapping *)XIP_FIXUP(&kernel_map))
 #endif
 
+phys_addr_t phys_ram_base __ro_after_init;
+EXPORT_SYMBOL(phys_ram_base);
+
 #ifdef CONFIG_XIP_KERNEL
 extern char _xiprom[], _exiprom[];
 #endif
@@ -160,7 +163,7 @@ static void __init setup_bootmem(void)
        phys_addr_t vmlinux_end = __pa_symbol(&_end);
        phys_addr_t vmlinux_start = __pa_symbol(&_start);
        phys_addr_t __maybe_unused max_mapped_addr;
-       phys_addr_t dram_end;
+       phys_addr_t phys_ram_end;
 
 #ifdef CONFIG_XIP_KERNEL
        vmlinux_start = __pa_symbol(&_sdata);
@@ -181,9 +184,12 @@ static void __init setup_bootmem(void)
 #endif
        memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
 
-       dram_end = memblock_end_of_DRAM();
 
+       phys_ram_end = memblock_end_of_DRAM();
 #ifndef CONFIG_64BIT
+#ifndef CONFIG_XIP_KERNEL
+       phys_ram_base = memblock_start_of_DRAM();
+#endif
        /*
         * memblock allocator is not aware of the fact that last 4K bytes of
         * the addressable memory can not be mapped because of IS_ERR_VALUE
@@ -191,15 +197,15 @@ static void __init setup_bootmem(void)
         * if end of dram is equal to maximum addressable memory.  For 64-bit
         * kernel, this problem can't happen here as the end of the virtual
         * address space is occupied by the kernel mapping then this check must
-        * be done in create_kernel_page_table.
+        * be done as soon as the kernel mapping base address is determined.
         */
        max_mapped_addr = __pa(~(ulong)0);
-       if (max_mapped_addr == (dram_end - 1))
+       if (max_mapped_addr == (phys_ram_end - 1))
                memblock_set_current_limit(max_mapped_addr - 4096);
 #endif
 
-       min_low_pfn = PFN_UP(memblock_start_of_DRAM());
-       max_low_pfn = max_pfn = PFN_DOWN(dram_end);
+       min_low_pfn = PFN_UP(phys_ram_base);
+       max_low_pfn = max_pfn = PFN_DOWN(phys_ram_end);
 
        dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn));
        set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET);
@@ -558,6 +564,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
        kernel_map.xiprom = (uintptr_t)CONFIG_XIP_PHYS_ADDR;
        kernel_map.xiprom_sz = (uintptr_t)(&_exiprom) - (uintptr_t)(&_xiprom);
 
+       phys_ram_base = CONFIG_PHYS_RAM_BASE;
        kernel_map.phys_addr = (uintptr_t)CONFIG_PHYS_RAM_BASE;
        kernel_map.size = (uintptr_t)(&_end) - (uintptr_t)(&_sdata);
 
index 81de865..e649742 100644 (file)
@@ -1251,6 +1251,10 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
                        return -1;
                break;
 
+       /* speculation barrier */
+       case BPF_ST | BPF_NOSPEC:
+               break;
+
        case BPF_ST | BPF_MEM | BPF_B:
        case BPF_ST | BPF_MEM | BPF_H:
        case BPF_ST | BPF_MEM | BPF_W:
index 87e3bf5..3af4131 100644 (file)
@@ -939,6 +939,10 @@ out_be:
                emit_ld(rd, 0, RV_REG_T1, ctx);
                break;
 
+       /* speculation barrier */
+       case BPF_ST | BPF_NOSPEC:
+               break;
+
        /* ST: *(size *)(dst + off) = imm */
        case BPF_ST | BPF_MEM | BPF_B:
                emit_imm(RV_REG_T1, imm, ctx);
index a0e2130..92c0a1b 100644 (file)
@@ -138,6 +138,8 @@ config S390
        select HAVE_ARCH_JUMP_LABEL_RELATIVE
        select HAVE_ARCH_KASAN
        select HAVE_ARCH_KASAN_VMALLOC
+       select HAVE_ARCH_KCSAN
+       select HAVE_ARCH_KFENCE
        select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
        select HAVE_ARCH_SECCOMP_FILTER
        select HAVE_ARCH_SOFT_DIRTY
index 1e31728..17dc4f1 100644 (file)
@@ -142,7 +142,8 @@ all: bzImage
 KBUILD_IMAGE   := $(boot)/bzImage
 
 install:
-       $(Q)$(MAKE) $(build)=$(boot) $@
+       sh -x $(srctree)/$(boot)/install.sh $(KERNELRELEASE) $(KBUILD_IMAGE) \
+             System.map "$(INSTALL_PATH)"
 
 bzImage: vmlinux
        $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
index 41a64b8..0ba6468 100644 (file)
@@ -7,6 +7,7 @@ KCOV_INSTRUMENT := n
 GCOV_PROFILE := n
 UBSAN_SANITIZE := n
 KASAN_SANITIZE := n
+KCSAN_SANITIZE := n
 
 KBUILD_AFLAGS := $(KBUILD_AFLAGS_DECOMPRESSOR)
 KBUILD_CFLAGS := $(KBUILD_CFLAGS_DECOMPRESSOR)
@@ -36,7 +37,7 @@ CFLAGS_sclp_early_core.o += -I$(srctree)/drivers/s390/char
 
 obj-y  := head.o als.o startup.o mem_detect.o ipl_parm.o ipl_report.o
 obj-y  += string.o ebcdic.o sclp_early_core.o mem.o ipl_vmparm.o cmdline.o
-obj-y  += version.o pgm_check_info.o ctype.o text_dma.o
+obj-y  += version.o pgm_check_info.o ctype.o
 obj-$(findstring y, $(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) $(CONFIG_PGSTE))  += uv.o
 obj-$(CONFIG_RELOCATABLE)      += machine_kexec_reloc.o
 obj-$(CONFIG_RANDOMIZE_BASE)   += kaslr.o
@@ -69,7 +70,3 @@ $(obj)/compressed/vmlinux: $(obj)/startup.a FORCE
 
 $(obj)/startup.a: $(OBJECTS) FORCE
        $(call if_changed,ar)
-
-install:
-       sh -x  $(srctree)/$(obj)/install.sh $(KERNELRELEASE) $(obj)/bzImage \
-             System.map "$(INSTALL_PATH)"
index ae04e1c..641ce0f 100644 (file)
@@ -2,14 +2,9 @@
 #ifndef BOOT_BOOT_H
 #define BOOT_BOOT_H
 
+#include <asm/extable.h>
 #include <linux/types.h>
 
-#define BOOT_STACK_OFFSET 0x8000
-
-#ifndef __ASSEMBLY__
-
-#include <linux/compiler.h>
-
 void startup_kernel(void);
 unsigned long detect_memory(void);
 bool is_ipl_block_dump(void);
@@ -18,17 +13,22 @@ void setup_boot_command_line(void);
 void parse_boot_command_line(void);
 void verify_facilities(void);
 void print_missing_facilities(void);
+void sclp_early_setup_buffer(void);
 void print_pgm_check_info(void);
 unsigned long get_random_base(unsigned long safe_addr);
 void __printf(1, 2) decompressor_printk(const char *fmt, ...);
 
+/* Symbols defined by linker scripts */
 extern const char kernel_version[];
 extern unsigned long memory_limit;
 extern unsigned long vmalloc_size;
 extern int vmalloc_size_set;
 extern int kaslr_enabled;
+extern char __boot_data_start[], __boot_data_end[];
+extern char __boot_data_preserved_start[], __boot_data_preserved_end[];
+extern char _decompressor_syms_start[], _decompressor_syms_end[];
+extern char _stack_start[], _stack_end[];
 
 unsigned long read_ipl_report(unsigned long safe_offset);
 
-#endif /* __ASSEMBLY__ */
 #endif /* BOOT_BOOT_H */
index 660c799..3b86006 100644 (file)
@@ -9,8 +9,10 @@ KCOV_INSTRUMENT := n
 GCOV_PROFILE := n
 UBSAN_SANITIZE := n
 KASAN_SANITIZE := n
+KCSAN_SANITIZE := n
 
 obj-y  := $(if $(CONFIG_KERNEL_UNCOMPRESSED),,decompressor.o) info.o
+obj-$(CONFIG_KERNEL_ZSTD) += clz_ctz.o
 obj-all := $(obj-y) piggy.o syms.o
 targets        := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2
 targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
diff --git a/arch/s390/boot/compressed/clz_ctz.c b/arch/s390/boot/compressed/clz_ctz.c
new file mode 100644 (file)
index 0000000..c3ebf24
--- /dev/null
@@ -0,0 +1,2 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "../../../../lib/clz_ctz.c"
index 37a4a8d..e27c214 100644 (file)
 #define memmove memmove
 #define memzero(s, n) memset((s), 0, (n))
 
-/* Symbols defined by linker scripts */
-extern char _end[];
-extern unsigned char _compressed_start[];
-extern unsigned char _compressed_end[];
-
 #ifdef CONFIG_KERNEL_BZIP2
 #define BOOT_HEAP_SIZE 0x400000
 #elif CONFIG_KERNEL_ZSTD
index 41f0ad9..a59f75c 100644 (file)
@@ -26,7 +26,12 @@ struct vmlinux_info {
        unsigned long rela_dyn_end;
 };
 
+/* Symbols defined by linker scripts */
+extern char _end[];
+extern unsigned char _compressed_start[];
+extern unsigned char _compressed_end[];
 extern char _vmlinux_info[];
+
 #define vmlinux (*(struct vmlinux_info *)_vmlinux_info)
 
 #endif /* BOOT_COMPRESSED_DECOMPRESSOR_H */
index 27a09c1..918e051 100644 (file)
@@ -1,6 +1,9 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 #include <asm-generic/vmlinux.lds.h>
 #include <asm/vmlinux.lds.h>
+#include <asm/thread_info.h>
+#include <asm/page.h>
+#include <asm/sclp.h>
 
 OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390")
 OUTPUT_ARCH(s390:64-bit)
@@ -34,27 +37,6 @@ SECTIONS
                *(.data.*)
                _edata = . ;
        }
-       /*
-       * .dma section for code, data, ex_table that need to stay below 2 GB,
-       * even when the kernel is relocate: above 2 GB.
-       */
-       . = ALIGN(PAGE_SIZE);
-       _sdma = .;
-       .dma.text : {
-               _stext_dma = .;
-               *(.dma.text)
-               . = ALIGN(PAGE_SIZE);
-               _etext_dma = .;
-       }
-       . = ALIGN(16);
-       .dma.ex_table : {
-               _start_dma_ex_table = .;
-               KEEP(*(.dma.ex_table))
-               _stop_dma_ex_table = .;
-       }
-       .dma.data : { *(.dma.data) }
-       . = ALIGN(PAGE_SIZE);
-       _edma = .;
 
        BOOT_DATA
        BOOT_DATA_PRESERVED
@@ -69,6 +51,17 @@ SECTIONS
                *(.bss)
                *(.bss.*)
                *(COMMON)
+               /*
+                * Stacks for the decompressor
+                */
+               . = ALIGN(PAGE_SIZE);
+               _dump_info_stack_start = .;
+               . += PAGE_SIZE;
+               _dump_info_stack_end = .;
+               . = ALIGN(PAGE_SIZE);
+               _stack_start = .;
+               . += BOOT_STACK_SIZE;
+               _stack_end = .;
                _ebss = .;
        }
 
index 51693cf..40f4cff 100644 (file)
 #include <linux/init.h>
 #include <linux/linkage.h>
 #include <asm/asm-offsets.h>
-#include <asm/thread_info.h>
 #include <asm/page.h>
 #include <asm/ptrace.h>
-#include "boot.h"
+#include <asm/sclp.h>
 
 #define ARCH_OFFSET    4
 
+#define EP_OFFSET      0x10008
+#define EP_STRING      "S390EP"
+
 __HEAD
 
 #define IPL_BS 0x730
@@ -275,11 +277,11 @@ iplstart:
 .Lcpuid:.fill  8,1,0
 
 #
-# startup-code at 0x10000, running in absolute addressing mode
+# normal startup-code, running in absolute addressing mode
 # this is called either by the ipl loader or directly by PSW restart
 # or linload or SALIPL
 #
-       .org    0x10000
+       .org    STARTUP_NORMAL_OFFSET
 SYM_CODE_START(startup)
        j       startup_normal
        .org    EP_OFFSET
@@ -292,9 +294,9 @@ SYM_CODE_START(startup)
        .ascii  EP_STRING
        .byte   0x00,0x01
 #
-# kdump startup-code at 0x10010, running in 64 bit absolute addressing mode
+# kdump startup-code, running in 64 bit absolute addressing mode
 #
-       .org    0x10010
+       .org    STARTUP_KDUMP_OFFSET
        j       startup_kdump
 SYM_CODE_END(startup)
 SYM_CODE_START_LOCAL(startup_normal)
@@ -315,18 +317,16 @@ SYM_CODE_START_LOCAL(startup_normal)
        xc      0x300(256),0x300
        xc      0xe00(256),0xe00
        xc      0xf00(256),0xf00
-       lctlg   %c0,%c15,.Lctl-.LPG0(%r13)      # load control registers
        stcke   __LC_BOOT_CLOCK
        mvc     __LC_LAST_UPDATE_CLOCK(8),__LC_BOOT_CLOCK+1
        spt     6f-.LPG0(%r13)
        mvc     __LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13)
-       l       %r15,.Lstack-.LPG0(%r13)
+       larl    %r15,_stack_end-STACK_FRAME_OVERHEAD
+       brasl   %r14,sclp_early_setup_buffer
        brasl   %r14,verify_facilities
        brasl   %r14,startup_kernel
 SYM_CODE_END(startup_normal)
 
-.Lstack:
-       .long   BOOT_STACK_OFFSET + BOOT_STACK_SIZE - STACK_FRAME_OVERHEAD
        .align  8
 6:     .long   0x7fffffff,0xffffffff
 .Lext_new_psw:
@@ -335,35 +335,6 @@ SYM_CODE_END(startup_normal)
        .quad   0x0000000180000000,startup_pgm_check_handler
 .Lio_new_psw:
        .quad   0x0002000180000000,0x1f0        # disabled wait
-.Lctl: .quad   0x04040000              # cr0: AFP registers & secondary space
-       .quad   0                       # cr1: primary space segment table
-       .quad   .Lduct                  # cr2: dispatchable unit control table
-       .quad   0                       # cr3: instruction authorization
-       .quad   0xffff                  # cr4: instruction authorization
-       .quad   .Lduct                  # cr5: primary-aste origin
-       .quad   0                       # cr6:  I/O interrupts
-       .quad   0                       # cr7:  secondary space segment table
-       .quad   0x0000000000008000      # cr8:  access registers translation
-       .quad   0                       # cr9:  tracing off
-       .quad   0                       # cr10: tracing off
-       .quad   0                       # cr11: tracing off
-       .quad   0                       # cr12: tracing off
-       .quad   0                       # cr13: home space segment table
-       .quad   0xc0000000              # cr14: machine check handling off
-       .quad   .Llinkage_stack         # cr15: linkage stack operations
-
-       .section .dma.data,"aw",@progbits
-.Lduct: .long  0,.Laste,.Laste,0,.Lduald,0,0,0
-       .long   0,0,0,0,0,0,0,0
-.Llinkage_stack:
-       .long   0,0,0x89000000,0,0,0,0x8a000000,0
-       .align 64
-.Laste:        .quad   0,0xffffffffffffffff,0,0,0,0,0,0
-       .align  128
-.Lduald:.rept  8
-       .long   0x80000000,0,0,0        # invalid access-list entries
-       .endr
-       .previous
 
 #include "head_kdump.S"
 
@@ -386,15 +357,13 @@ SYM_CODE_START_LOCAL(startup_pgm_check_handler)
        oi      __LC_RETURN_PSW+1,0x2   # set wait state bit
        larl    %r9,.Lold_psw_disabled_wait
        stg     %r9,__LC_PGM_NEW_PSW+8
-       l       %r15,.Ldump_info_stack-.Lold_psw_disabled_wait(%r9)
+       larl    %r15,_dump_info_stack_end-STACK_FRAME_OVERHEAD
        brasl   %r14,print_pgm_check_info
 .Lold_psw_disabled_wait:
        la      %r8,4095
        lmg     %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r8)
        lpswe   __LC_RETURN_PSW         # disabled wait
 SYM_CODE_END(startup_pgm_check_handler)
-.Ldump_info_stack:
-       .long   0x5000 + PAGE_SIZE - STACK_FRAME_OVERHEAD
 
 #
 # params at 10400 (setup.h)
@@ -415,7 +384,4 @@ SYM_DATA_START(parmarea)
        .org    PARMAREA+__PARMAREA_SIZE
 SYM_DATA_END(parmarea)
 
-       .org    EARLY_SCCB_OFFSET
-       .fill   4096
-
        .org    HEAD_END
index 0b49655..9b14045 100644 (file)
@@ -54,9 +54,9 @@ static unsigned long find_bootdata_space(struct ipl_rb_components *comps,
         * not overlap with any component or any certificate.
         */
 repeat:
-       if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE &&
-           intersects(INITRD_START, INITRD_SIZE, safe_addr, size))
-               safe_addr = INITRD_START + INITRD_SIZE;
+       if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_data.start && initrd_data.size &&
+           intersects(initrd_data.start, initrd_data.size, safe_addr, size))
+               safe_addr = initrd_data.start + initrd_data.size;
        for_each_rb_entry(comp, comps)
                if (intersects(safe_addr, size, comp->addr, comp->len)) {
                        safe_addr = comp->addr + comp->len;
index 0dd48fb..d898446 100644 (file)
@@ -186,9 +186,9 @@ unsigned long get_random_base(unsigned long safe_addr)
         */
        memory_limit -= kasan_estimate_memory_needs(memory_limit);
 
-       if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE) {
-               if (safe_addr < INITRD_START + INITRD_SIZE)
-                       safe_addr = INITRD_START + INITRD_SIZE;
+       if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_data.start && initrd_data.size) {
+               if (safe_addr < initrd_data.start + initrd_data.size)
+                       safe_addr = initrd_data.start + initrd_data.size;
        }
        safe_addr = ALIGN(safe_addr, THREAD_SIZE);
 
index 4e17adb..2f949cd 100644 (file)
@@ -1,6 +1,8 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <linux/errno.h>
 #include <linux/init.h>
+#include <asm/setup.h>
+#include <asm/processor.h>
 #include <asm/sclp.h>
 #include <asm/sections.h>
 #include <asm/mem_detect.h>
@@ -24,9 +26,9 @@ static void *mem_detect_alloc_extended(void)
 {
        unsigned long offset = ALIGN(mem_safe_offset(), sizeof(u64));
 
-       if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE &&
-           INITRD_START < offset + ENTRIES_EXTENDED_MAX)
-               offset = ALIGN(INITRD_START + INITRD_SIZE, sizeof(u64));
+       if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_data.start && initrd_data.size &&
+           initrd_data.start < offset + ENTRIES_EXTENDED_MAX)
+               offset = ALIGN(initrd_data.start + initrd_data.size, sizeof(u64));
 
        return (void *)offset;
 }
index 3a46abe..209f6ae 100644 (file)
@@ -29,7 +29,6 @@ static char *symstart(char *p)
        return p + 1;
 }
 
-extern char _decompressor_syms_start[], _decompressor_syms_end[];
 static noinline char *findsym(unsigned long ip, unsigned short *off, unsigned short *len)
 {
        /* symbol entries are in a form "10000 c4 startup\0" */
@@ -126,8 +125,8 @@ out:
 
 static noinline void print_stacktrace(void)
 {
-       struct stack_info boot_stack = { STACK_TYPE_TASK, BOOT_STACK_OFFSET,
-                                        BOOT_STACK_OFFSET + BOOT_STACK_SIZE };
+       struct stack_info boot_stack = { STACK_TYPE_TASK, (unsigned long)_stack_start,
+                                        (unsigned long)_stack_end };
        unsigned long sp = S390_lowcore.gpregs_save_area[15];
        bool first = true;
 
index 5a19fd7..6f30646 100644 (file)
@@ -1,2 +1,11 @@
 // SPDX-License-Identifier: GPL-2.0
+#include "boot.h"
 #include "../../../drivers/s390/char/sclp_early_core.c"
+
+/* SCLP early buffer must stay page-aligned and below 2GB */
+static char __sclp_early_sccb[EXT_SCCB_READ_SCP] __aligned(PAGE_SIZE);
+
+void sclp_early_setup_buffer(void)
+{
+       sclp_early_set_buffer(&__sclp_early_sccb);
+}
index d0cf216..6dc8d0a 100644 (file)
@@ -12,9 +12,8 @@
 #include <asm/uv.h>
 #include "compressed/decompressor.h"
 #include "boot.h"
+#include "uv.h"
 
-extern char __boot_data_start[], __boot_data_end[];
-extern char __boot_data_preserved_start[], __boot_data_preserved_end[];
 unsigned long __bootdata_preserved(__kaslr_offset);
 unsigned long __bootdata_preserved(VMALLOC_START);
 unsigned long __bootdata_preserved(VMALLOC_END);
@@ -24,44 +23,11 @@ unsigned long __bootdata_preserved(MODULES_VADDR);
 unsigned long __bootdata_preserved(MODULES_END);
 unsigned long __bootdata(ident_map_size);
 int __bootdata(is_full_image) = 1;
+struct initrd_data __bootdata(initrd_data);
 
 u64 __bootdata_preserved(stfle_fac_list[16]);
 u64 __bootdata_preserved(alt_stfle_fac_list[16]);
-
-/*
- * Some code and data needs to stay below 2 GB, even when the kernel would be
- * relocated above 2 GB, because it has to use 31 bit addresses.
- * Such code and data is part of the .dma section, and its location is passed
- * over to the decompressed / relocated kernel via the .boot.preserved.data
- * section.
- */
-extern char _sdma[], _edma[];
-extern char _stext_dma[], _etext_dma[];
-extern struct exception_table_entry _start_dma_ex_table[];
-extern struct exception_table_entry _stop_dma_ex_table[];
-unsigned long __bootdata_preserved(__sdma) = __pa(&_sdma);
-unsigned long __bootdata_preserved(__edma) = __pa(&_edma);
-unsigned long __bootdata_preserved(__stext_dma) = __pa(&_stext_dma);
-unsigned long __bootdata_preserved(__etext_dma) = __pa(&_etext_dma);
-struct exception_table_entry *
-       __bootdata_preserved(__start_dma_ex_table) = _start_dma_ex_table;
-struct exception_table_entry *
-       __bootdata_preserved(__stop_dma_ex_table) = _stop_dma_ex_table;
-
-int _diag210_dma(struct diag210 *addr);
-int _diag26c_dma(void *req, void *resp, enum diag26c_sc subcode);
-int _diag14_dma(unsigned long rx, unsigned long ry1, unsigned long subcode);
-void _diag0c_dma(struct hypfs_diag0c_entry *entry);
-void _diag308_reset_dma(void);
-struct diag_ops __bootdata_preserved(diag_dma_ops) = {
-       .diag210 = _diag210_dma,
-       .diag26c = _diag26c_dma,
-       .diag14 = _diag14_dma,
-       .diag0c = _diag0c_dma,
-       .diag308_reset = _diag308_reset_dma
-};
-static struct diag210 _diag210_tmp_dma __section(".dma.data");
-struct diag210 *__bootdata_preserved(__diag210_tmp_dma) = &_diag210_tmp_dma;
+struct oldmem_data __bootdata_preserved(oldmem_data);
 
 void error(char *x)
 {
@@ -91,12 +57,12 @@ static void rescue_initrd(unsigned long addr)
 {
        if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
                return;
-       if (!INITRD_START || !INITRD_SIZE)
+       if (!initrd_data.start || !initrd_data.size)
                return;
-       if (addr <= INITRD_START)
+       if (addr <= initrd_data.start)
                return;
-       memmove((void *)addr, (void *)INITRD_START, INITRD_SIZE);
-       INITRD_START = addr;
+       memmove((void *)addr, (void *)initrd_data.start, initrd_data.size);
+       initrd_data.start = addr;
 }
 
 static void copy_bootdata(void)
@@ -169,9 +135,9 @@ static void setup_ident_map_size(unsigned long max_physmem_end)
        ident_map_size = min(ident_map_size, 1UL << MAX_PHYSMEM_BITS);
 
 #ifdef CONFIG_CRASH_DUMP
-       if (OLDMEM_BASE) {
+       if (oldmem_data.start) {
                kaslr_enabled = 0;
-               ident_map_size = min(ident_map_size, OLDMEM_SIZE);
+               ident_map_size = min(ident_map_size, oldmem_data.size);
        } else if (ipl_block_valid && is_ipl_block_dump()) {
                kaslr_enabled = 0;
                if (!sclp_early_get_hsa_size(&hsa_size) && hsa_size)
@@ -282,12 +248,28 @@ static void setup_vmalloc_size(void)
        vmalloc_size = max(size, vmalloc_size);
 }
 
+static void offset_vmlinux_info(unsigned long offset)
+{
+       vmlinux.default_lma += offset;
+       *(unsigned long *)(&vmlinux.entry) += offset;
+       vmlinux.bootdata_off += offset;
+       vmlinux.bootdata_preserved_off += offset;
+       vmlinux.rela_dyn_start += offset;
+       vmlinux.rela_dyn_end += offset;
+       vmlinux.dynsym_start += offset;
+}
+
 void startup_kernel(void)
 {
        unsigned long random_lma;
        unsigned long safe_addr;
        void *img;
 
+       initrd_data.start = parmarea.initrd_start;
+       initrd_data.size = parmarea.initrd_size;
+       oldmem_data.start = parmarea.oldmem_base;
+       oldmem_data.size = parmarea.oldmem_size;
+
        setup_lpp();
        store_ipl_parmblock();
        safe_addr = mem_safe_offset();
@@ -297,23 +279,17 @@ void startup_kernel(void)
        sclp_early_read_info();
        setup_boot_command_line();
        parse_boot_command_line();
+       sanitize_prot_virt_host();
        setup_ident_map_size(detect_memory());
        setup_vmalloc_size();
        setup_kernel_memory_layout();
 
-       random_lma = __kaslr_offset = 0;
        if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_enabled) {
                random_lma = get_random_base(safe_addr);
                if (random_lma) {
                        __kaslr_offset = random_lma - vmlinux.default_lma;
                        img = (void *)vmlinux.default_lma;
-                       vmlinux.default_lma += __kaslr_offset;
-                       vmlinux.entry += __kaslr_offset;
-                       vmlinux.bootdata_off += __kaslr_offset;
-                       vmlinux.bootdata_preserved_off += __kaslr_offset;
-                       vmlinux.rela_dyn_start += __kaslr_offset;
-                       vmlinux.rela_dyn_end += __kaslr_offset;
-                       vmlinux.dynsym_start += __kaslr_offset;
+                       offset_vmlinux_info(__kaslr_offset);
                }
        }
 
diff --git a/arch/s390/boot/text_dma.S b/arch/s390/boot/text_dma.S
deleted file mode 100644 (file)
index 5ff5fee..0000000
+++ /dev/null
@@ -1,156 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Code that needs to run below 2 GB.
- *
- * Copyright IBM Corp. 2019
- */
-
-#include <linux/linkage.h>
-#include <asm/errno.h>
-#include <asm/sigp.h>
-
-       .section .dma.text,"ax"
-/*
- * Simplified version of expoline thunk. The normal thunks can not be used here,
- * because they might be more than 2 GB away, and not reachable by the relative
- * branch. No comdat, exrl, etc. optimizations used here, because it only
- * affects a few functions that are not performance-relevant.
- */
-       .macro BR_EX_DMA_r14
-       larl    %r1,0f
-       ex      0,0(%r1)
-       j       .
-0:     br      %r14
-       .endm
-
-/*
- * int _diag14_dma(unsigned long rx, unsigned long ry1, unsigned long subcode)
- */
-ENTRY(_diag14_dma)
-       lgr     %r1,%r2
-       lgr     %r2,%r3
-       lgr     %r3,%r4
-       lhi     %r5,-EIO
-       sam31
-       diag    %r1,%r2,0x14
-.Ldiag14_ex:
-       ipm     %r5
-       srl     %r5,28
-.Ldiag14_fault:
-       sam64
-       lgfr    %r2,%r5
-       BR_EX_DMA_r14
-       EX_TABLE_DMA(.Ldiag14_ex, .Ldiag14_fault)
-ENDPROC(_diag14_dma)
-
-/*
- * int _diag210_dma(struct diag210 *addr)
- */
-ENTRY(_diag210_dma)
-       lgr     %r1,%r2
-       lhi     %r2,-1
-       sam31
-       diag    %r1,%r0,0x210
-.Ldiag210_ex:
-       ipm     %r2
-       srl     %r2,28
-.Ldiag210_fault:
-       sam64
-       lgfr    %r2,%r2
-       BR_EX_DMA_r14
-       EX_TABLE_DMA(.Ldiag210_ex, .Ldiag210_fault)
-ENDPROC(_diag210_dma)
-
-/*
- * int _diag26c_dma(void *req, void *resp, enum diag26c_sc subcode)
- */
-ENTRY(_diag26c_dma)
-       lghi    %r5,-EOPNOTSUPP
-       sam31
-       diag    %r2,%r4,0x26c
-.Ldiag26c_ex:
-       sam64
-       lgfr    %r2,%r5
-       BR_EX_DMA_r14
-       EX_TABLE_DMA(.Ldiag26c_ex, .Ldiag26c_ex)
-ENDPROC(_diag26c_dma)
-
-/*
- * void _diag0c_dma(struct hypfs_diag0c_entry *entry)
- */
-ENTRY(_diag0c_dma)
-       sam31
-       diag    %r2,%r2,0x0c
-       sam64
-       BR_EX_DMA_r14
-ENDPROC(_diag0c_dma)
-
-/*
- * void _diag308_reset_dma(void)
- *
- * Calls diag 308 subcode 1 and continues execution
- */
-ENTRY(_diag308_reset_dma)
-       larl    %r4,.Lctlregs           # Save control registers
-       stctg   %c0,%c15,0(%r4)
-       lg      %r2,0(%r4)              # Disable lowcore protection
-       nilh    %r2,0xefff
-       larl    %r4,.Lctlreg0
-       stg     %r2,0(%r4)
-       lctlg   %c0,%c0,0(%r4)
-       larl    %r4,.Lfpctl             # Floating point control register
-       stfpc   0(%r4)
-       larl    %r4,.Lprefix            # Save prefix register
-       stpx    0(%r4)
-       larl    %r4,.Lprefix_zero       # Set prefix register to 0
-       spx     0(%r4)
-       larl    %r4,.Lcontinue_psw      # Save PSW flags
-       epsw    %r2,%r3
-       stm     %r2,%r3,0(%r4)
-       larl    %r4,restart_part2       # Setup restart PSW at absolute 0
-       larl    %r3,.Lrestart_diag308_psw
-       og      %r4,0(%r3)              # Save PSW
-       lghi    %r3,0
-       sturg   %r4,%r3                 # Use sturg, because of large pages
-       lghi    %r1,1
-       lghi    %r0,0
-       diag    %r0,%r1,0x308
-restart_part2:
-       lhi     %r0,0                   # Load r0 with zero
-       lhi     %r1,2                   # Use mode 2 = ESAME (dump)
-       sigp    %r1,%r0,SIGP_SET_ARCHITECTURE   # Switch to ESAME mode
-       sam64                           # Switch to 64 bit addressing mode
-       larl    %r4,.Lctlregs           # Restore control registers
-       lctlg   %c0,%c15,0(%r4)
-       larl    %r4,.Lfpctl             # Restore floating point ctl register
-       lfpc    0(%r4)
-       larl    %r4,.Lprefix            # Restore prefix register
-       spx     0(%r4)
-       larl    %r4,.Lcontinue_psw      # Restore PSW flags
-       lpswe   0(%r4)
-.Lcontinue:
-       BR_EX_DMA_r14
-ENDPROC(_diag308_reset_dma)
-
-       .section .dma.data,"aw",@progbits
-.align 8
-.Lrestart_diag308_psw:
-       .long   0x00080000,0x80000000
-
-.align 8
-.Lcontinue_psw:
-       .quad   0,.Lcontinue
-
-.align 8
-.Lctlreg0:
-       .quad   0
-.Lctlregs:
-       .rept   16
-       .quad   0
-       .endr
-.Lfpctl:
-       .long   0
-.Lprefix:
-       .long   0
-.Lprefix_zero:
-       .long   0
index f6b0c4f..e6be155 100644 (file)
@@ -1,8 +1,12 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <asm/uv.h>
+#include <asm/boot_data.h>
 #include <asm/facility.h>
 #include <asm/sections.h>
 
+#include "boot.h"
+#include "uv.h"
+
 /* will be used in arch/s390/kernel/uv.c */
 #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
 int __bootdata_preserved(prot_virt_guest);
@@ -47,26 +51,34 @@ void uv_query_info(void)
 }
 
 #if IS_ENABLED(CONFIG_KVM)
-static bool has_uv_sec_stor_limit(void)
+void adjust_to_uv_max(unsigned long *vmax)
 {
-       /*
-        * keep these conditions in line with setup_uv()
-        */
-       if (!is_prot_virt_host())
-               return false;
+       if (is_prot_virt_host() && uv_info.max_sec_stor_addr)
+               *vmax = min_t(unsigned long, *vmax, uv_info.max_sec_stor_addr);
+}
 
+static int is_prot_virt_host_capable(void)
+{
+       /* disable if no prot_virt=1 given on command-line */
+       if (!is_prot_virt_host())
+               return 0;
+       /* disable if protected guest virtualization is enabled */
        if (is_prot_virt_guest())
-               return false;
-
+               return 0;
+       /* disable if no hardware support */
        if (!test_facility(158))
-               return false;
-
-       return !!uv_info.max_sec_stor_addr;
+               return 0;
+       /* disable if kdump */
+       if (oldmem_data.start)
+               return 0;
+       /* disable if stand-alone dump */
+       if (ipl_block_valid && is_ipl_block_dump())
+               return 0;
+       return 1;
 }
 
-void adjust_to_uv_max(unsigned long *vmax)
+void sanitize_prot_virt_host(void)
 {
-       if (has_uv_sec_stor_limit())
-               *vmax = min_t(unsigned long, *vmax, uv_info.max_sec_stor_addr);
+       prot_virt_host = is_prot_virt_host_capable();
 }
 #endif
diff --git a/arch/s390/boot/uv.h b/arch/s390/boot/uv.h
new file mode 100644 (file)
index 0000000..690ce01
--- /dev/null
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef BOOT_UV_H
+#define BOOT_UV_H
+
+#if IS_ENABLED(CONFIG_KVM)
+void adjust_to_uv_max(unsigned long *vmax);
+void sanitize_prot_virt_host(void);
+#else
+static inline void adjust_to_uv_max(unsigned long *vmax) {}
+static inline void sanitize_prot_virt_host(void) {}
+#endif
+
+#if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM)
+void uv_query_info(void);
+#else
+static inline void uv_query_info(void) {}
+#endif
+
+#endif /* BOOT_UV_H */
index 7de253f..11ffc7c 100644 (file)
@@ -10,7 +10,6 @@ CONFIG_BPF_JIT=y
 CONFIG_BPF_JIT_ALWAYS_ON=y
 CONFIG_BPF_LSM=y
 CONFIG_PREEMPT=y
-CONFIG_SCHED_CORE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_TASKSTATS=y
@@ -75,7 +74,6 @@ CONFIG_MODULE_FORCE_UNLOAD=y
 CONFIG_MODVERSIONS=y
 CONFIG_MODULE_SRCVERSION_ALL=y
 CONFIG_MODULE_SIG_SHA256=y
-CONFIG_BLK_DEV_INTEGRITY=y
 CONFIG_BLK_DEV_THROTTLING=y
 CONFIG_BLK_WBT=y
 CONFIG_BLK_CGROUP_IOLATENCY=y
@@ -335,7 +333,7 @@ CONFIG_L2TP_DEBUGFS=m
 CONFIG_L2TP_V3=y
 CONFIG_L2TP_IP=m
 CONFIG_L2TP_ETH=m
-CONFIG_BRIDGE=m
+CONFIG_BRIDGE=y
 CONFIG_BRIDGE_MRP=y
 CONFIG_VLAN_8021Q=m
 CONFIG_VLAN_8021Q_GVRP=y
@@ -466,6 +464,7 @@ CONFIG_DM_FLAKEY=m
 CONFIG_DM_VERITY=m
 CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG=y
 CONFIG_DM_SWITCH=m
+CONFIG_DM_INTEGRITY=m
 CONFIG_NETDEVICES=y
 CONFIG_BONDING=m
 CONFIG_DUMMY=m
index b671642..e1642d2 100644 (file)
@@ -8,7 +8,6 @@ CONFIG_BPF_SYSCALL=y
 CONFIG_BPF_JIT=y
 CONFIG_BPF_JIT_ALWAYS_ON=y
 CONFIG_BPF_LSM=y
-CONFIG_SCHED_CORE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_TASKSTATS=y
@@ -325,7 +324,7 @@ CONFIG_L2TP_DEBUGFS=m
 CONFIG_L2TP_V3=y
 CONFIG_L2TP_IP=m
 CONFIG_L2TP_ETH=m
-CONFIG_BRIDGE=m
+CONFIG_BRIDGE=y
 CONFIG_BRIDGE_MRP=y
 CONFIG_VLAN_8021Q=m
 CONFIG_VLAN_8021Q_GVRP=y
index 6c43d2b..9a27860 100644 (file)
@@ -21,7 +21,7 @@
 static void diag0c_fn(void *data)
 {
        diag_stat_inc(DIAG_STAT_X00C);
-       diag_dma_ops.diag0c(((void **) data)[smp_processor_id()]);
+       diag_amode31_ops.diag0c(((void **)data)[smp_processor_id()]);
 }
 
 /*
@@ -33,12 +33,12 @@ static void *diag0c_store(unsigned int *count)
        unsigned int cpu_count, cpu, i;
        void **cpu_vec;
 
-       get_online_cpus();
+       cpus_read_lock();
        cpu_count = num_online_cpus();
        cpu_vec = kmalloc_array(num_possible_cpus(), sizeof(*cpu_vec),
                                GFP_KERNEL);
        if (!cpu_vec)
-               goto fail_put_online_cpus;
+               goto fail_unlock_cpus;
        /* Note: Diag 0c needs 8 byte alignment and real storage */
        diag0c_data = kzalloc(struct_size(diag0c_data, entry, cpu_count),
                              GFP_KERNEL | GFP_DMA);
@@ -54,13 +54,13 @@ static void *diag0c_store(unsigned int *count)
        on_each_cpu(diag0c_fn, cpu_vec, 1);
        *count = cpu_count;
        kfree(cpu_vec);
-       put_online_cpus();
+       cpus_read_unlock();
        return diag0c_data;
 
 fail_kfree_cpu_vec:
        kfree(cpu_vec);
-fail_put_online_cpus:
-       put_online_cpus();
+fail_unlock_cpus:
+       cpus_read_unlock();
        return ERR_PTR(-ENOMEM);
 }
 
index f58c92f..1effac6 100644 (file)
@@ -5,7 +5,6 @@
 #ifndef _ASM_S390_CIO_H_
 #define _ASM_S390_CIO_H_
 
-#include <linux/spinlock.h>
 #include <linux/bitops.h>
 #include <linux/genalloc.h>
 #include <asm/types.h>
index c0f3bfe..646b129 100644 (file)
@@ -173,17 +173,16 @@ typedef struct { unsigned char bytes[16]; } cpacf_mask_t;
  */
 static __always_inline void __cpacf_query(unsigned int opcode, cpacf_mask_t *mask)
 {
-       register unsigned long r0 asm("0") = 0; /* query function */
-       register unsigned long r1 asm("1") = (unsigned long) mask;
-
        asm volatile(
-               "       spm 0\n" /* pckmo doesn't change the cc */
+               "       lghi    0,0\n" /* query function */
+               "       lgr     1,%[mask]\n"
+               "       spm     0\n" /* pckmo doesn't change the cc */
                /* Parameter regs are ignored, but must be nonzero and unique */
                "0:     .insn   rrf,%[opc] << 16,2,4,6,0\n"
                "       brc     1,0b\n" /* handle partial completion */
                : "=m" (*mask)
-               : [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (opcode)
-               : "cc");
+               : [mask] "d" ((unsigned long)mask), [opc] "i" (opcode)
+               : "cc", "0", "1");
 }
 
 static __always_inline int __cpacf_check_opcode(unsigned int opcode)
@@ -249,20 +248,22 @@ static __always_inline int cpacf_query_func(unsigned int opcode, unsigned int fu
 static inline int cpacf_km(unsigned long func, void *param,
                           u8 *dest, const u8 *src, long src_len)
 {
-       register unsigned long r0 asm("0") = (unsigned long) func;
-       register unsigned long r1 asm("1") = (unsigned long) param;
-       register unsigned long r2 asm("2") = (unsigned long) src;
-       register unsigned long r3 asm("3") = (unsigned long) src_len;
-       register unsigned long r4 asm("4") = (unsigned long) dest;
+       union register_pair d, s;
 
+       d.even = (unsigned long)dest;
+       s.even = (unsigned long)src;
+       s.odd  = (unsigned long)src_len;
        asm volatile(
+               "       lgr     0,%[fc]\n"
+               "       lgr     1,%[pba]\n"
                "0:     .insn   rre,%[opc] << 16,%[dst],%[src]\n"
                "       brc     1,0b\n" /* handle partial completion */
-               : [src] "+a" (r2), [len] "+d" (r3), [dst] "+a" (r4)
-               : [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_KM)
-               : "cc", "memory");
+               : [src] "+&d" (s.pair), [dst] "+&d" (d.pair)
+               : [fc] "d" (func), [pba] "d" ((unsigned long)param),
+                 [opc] "i" (CPACF_KM)
+               : "cc", "memory", "0", "1");
 
-       return src_len - r3;
+       return src_len - s.odd;
 }
 
 /**
@@ -279,20 +280,22 @@ static inline int cpacf_km(unsigned long func, void *param,
 static inline int cpacf_kmc(unsigned long func, void *param,
                            u8 *dest, const u8 *src, long src_len)
 {
-       register unsigned long r0 asm("0") = (unsigned long) func;
-       register unsigned long r1 asm("1") = (unsigned long) param;
-       register unsigned long r2 asm("2") = (unsigned long) src;
-       register unsigned long r3 asm("3") = (unsigned long) src_len;
-       register unsigned long r4 asm("4") = (unsigned long) dest;
+       union register_pair d, s;
 
+       d.even = (unsigned long)dest;
+       s.even = (unsigned long)src;
+       s.odd  = (unsigned long)src_len;
        asm volatile(
+               "       lgr     0,%[fc]\n"
+               "       lgr     1,%[pba]\n"
                "0:     .insn   rre,%[opc] << 16,%[dst],%[src]\n"
                "       brc     1,0b\n" /* handle partial completion */
-               : [src] "+a" (r2), [len] "+d" (r3), [dst] "+a" (r4)
-               : [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_KMC)
-               : "cc", "memory");
+               : [src] "+&d" (s.pair), [dst] "+&d" (d.pair)
+               : [fc] "d" (func), [pba] "d" ((unsigned long)param),
+                 [opc] "i" (CPACF_KMC)
+               : "cc", "memory", "0", "1");
 
-       return src_len - r3;
+       return src_len - s.odd;
 }
 
 /**
@@ -306,17 +309,19 @@ static inline int cpacf_kmc(unsigned long func, void *param,
 static inline void cpacf_kimd(unsigned long func, void *param,
                              const u8 *src, long src_len)
 {
-       register unsigned long r0 asm("0") = (unsigned long) func;
-       register unsigned long r1 asm("1") = (unsigned long) param;
-       register unsigned long r2 asm("2") = (unsigned long) src;
-       register unsigned long r3 asm("3") = (unsigned long) src_len;
+       union register_pair s;
 
+       s.even = (unsigned long)src;
+       s.odd  = (unsigned long)src_len;
        asm volatile(
+               "       lgr     0,%[fc]\n"
+               "       lgr     1,%[pba]\n"
                "0:     .insn   rre,%[opc] << 16,0,%[src]\n"
                "       brc     1,0b\n" /* handle partial completion */
-               : [src] "+a" (r2), [len] "+d" (r3)
-               : [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_KIMD)
-               : "cc", "memory");
+               : [src] "+&d" (s.pair)
+               : [fc] "d" (func), [pba] "d" ((unsigned long)(param)),
+                 [opc] "i" (CPACF_KIMD)
+               : "cc", "memory", "0", "1");
 }
 
 /**
@@ -329,17 +334,19 @@ static inline void cpacf_kimd(unsigned long func, void *param,
 static inline void cpacf_klmd(unsigned long func, void *param,
                              const u8 *src, long src_len)
 {
-       register unsigned long r0 asm("0") = (unsigned long) func;
-       register unsigned long r1 asm("1") = (unsigned long) param;
-       register unsigned long r2 asm("2") = (unsigned long) src;
-       register unsigned long r3 asm("3") = (unsigned long) src_len;
+       union register_pair s;
 
+       s.even = (unsigned long)src;
+       s.odd  = (unsigned long)src_len;
        asm volatile(
+               "       lgr     0,%[fc]\n"
+               "       lgr     1,%[pba]\n"
                "0:     .insn   rre,%[opc] << 16,0,%[src]\n"
                "       brc     1,0b\n" /* handle partial completion */
-               : [src] "+a" (r2), [len] "+d" (r3)
-               : [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_KLMD)
-               : "cc", "memory");
+               : [src] "+&d" (s.pair)
+               : [fc] "d" (func), [pba] "d" ((unsigned long)param),
+                 [opc] "i" (CPACF_KLMD)
+               : "cc", "memory", "0", "1");
 }
 
 /**
@@ -355,19 +362,21 @@ static inline void cpacf_klmd(unsigned long func, void *param,
 static inline int cpacf_kmac(unsigned long func, void *param,
                             const u8 *src, long src_len)
 {
-       register unsigned long r0 asm("0") = (unsigned long) func;
-       register unsigned long r1 asm("1") = (unsigned long) param;
-       register unsigned long r2 asm("2") = (unsigned long) src;
-       register unsigned long r3 asm("3") = (unsigned long) src_len;
+       union register_pair s;
 
+       s.even = (unsigned long)src;
+       s.odd  = (unsigned long)src_len;
        asm volatile(
+               "       lgr     0,%[fc]\n"
+               "       lgr     1,%[pba]\n"
                "0:     .insn   rre,%[opc] << 16,0,%[src]\n"
                "       brc     1,0b\n" /* handle partial completion */
-               : [src] "+a" (r2), [len] "+d" (r3)
-               : [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_KMAC)
-               : "cc", "memory");
+               : [src] "+&d" (s.pair)
+               : [fc] "d" (func), [pba] "d" ((unsigned long)param),
+                 [opc] "i" (CPACF_KMAC)
+               : "cc", "memory", "0", "1");
 
-       return src_len - r3;
+       return src_len - s.odd;
 }
 
 /**
@@ -385,22 +394,24 @@ static inline int cpacf_kmac(unsigned long func, void *param,
 static inline int cpacf_kmctr(unsigned long func, void *param, u8 *dest,
                              const u8 *src, long src_len, u8 *counter)
 {
-       register unsigned long r0 asm("0") = (unsigned long) func;
-       register unsigned long r1 asm("1") = (unsigned long) param;
-       register unsigned long r2 asm("2") = (unsigned long) src;
-       register unsigned long r3 asm("3") = (unsigned long) src_len;
-       register unsigned long r4 asm("4") = (unsigned long) dest;
-       register unsigned long r6 asm("6") = (unsigned long) counter;
+       union register_pair d, s, c;
 
+       d.even = (unsigned long)dest;
+       s.even = (unsigned long)src;
+       s.odd  = (unsigned long)src_len;
+       c.even = (unsigned long)counter;
        asm volatile(
+               "       lgr     0,%[fc]\n"
+               "       lgr     1,%[pba]\n"
                "0:     .insn   rrf,%[opc] << 16,%[dst],%[src],%[ctr],0\n"
                "       brc     1,0b\n" /* handle partial completion */
-               : [src] "+a" (r2), [len] "+d" (r3),
-                 [dst] "+a" (r4), [ctr] "+a" (r6)
-               : [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_KMCTR)
-               : "cc", "memory");
+               : [src] "+&d" (s.pair), [dst] "+&d" (d.pair),
+                 [ctr] "+&d" (c.pair)
+               : [fc] "d" (func), [pba] "d" ((unsigned long)param),
+                 [opc] "i" (CPACF_KMCTR)
+               : "cc", "memory", "0", "1");
 
-       return src_len - r3;
+       return src_len - s.odd;
 }
 
 /**
@@ -417,20 +428,21 @@ static inline void cpacf_prno(unsigned long func, void *param,
                              u8 *dest, unsigned long dest_len,
                              const u8 *seed, unsigned long seed_len)
 {
-       register unsigned long r0 asm("0") = (unsigned long) func;
-       register unsigned long r1 asm("1") = (unsigned long) param;
-       register unsigned long r2 asm("2") = (unsigned long) dest;
-       register unsigned long r3 asm("3") = (unsigned long) dest_len;
-       register unsigned long r4 asm("4") = (unsigned long) seed;
-       register unsigned long r5 asm("5") = (unsigned long) seed_len;
+       union register_pair d, s;
 
+       d.even = (unsigned long)dest;
+       d.odd  = (unsigned long)dest_len;
+       s.even = (unsigned long)seed;
+       s.odd  = (unsigned long)seed_len;
        asm volatile (
+               "       lgr     0,%[fc]\n"
+               "       lgr     1,%[pba]\n"
                "0:     .insn   rre,%[opc] << 16,%[dst],%[seed]\n"
                "       brc     1,0b\n"   /* handle partial completion */
-               : [dst] "+a" (r2), [dlen] "+d" (r3)
-               : [fc] "d" (r0), [pba] "a" (r1),
-                 [seed] "a" (r4), [slen] "d" (r5), [opc] "i" (CPACF_PRNO)
-               : "cc", "memory");
+               : [dst] "+&d" (d.pair)
+               : [fc] "d" (func), [pba] "d" ((unsigned long)param),
+                 [seed] "d" (s.pair), [opc] "i" (CPACF_PRNO)
+               : "cc", "memory", "0", "1");
 }
 
 /**
@@ -443,19 +455,19 @@ static inline void cpacf_prno(unsigned long func, void *param,
 static inline void cpacf_trng(u8 *ucbuf, unsigned long ucbuf_len,
                              u8 *cbuf, unsigned long cbuf_len)
 {
-       register unsigned long r0 asm("0") = (unsigned long) CPACF_PRNO_TRNG;
-       register unsigned long r2 asm("2") = (unsigned long) ucbuf;
-       register unsigned long r3 asm("3") = (unsigned long) ucbuf_len;
-       register unsigned long r4 asm("4") = (unsigned long) cbuf;
-       register unsigned long r5 asm("5") = (unsigned long) cbuf_len;
+       union register_pair u, c;
 
+       u.even = (unsigned long)ucbuf;
+       u.odd  = (unsigned long)ucbuf_len;
+       c.even = (unsigned long)cbuf;
+       c.odd  = (unsigned long)cbuf_len;
        asm volatile (
+               "       lghi    0,%[fc]\n"
                "0:     .insn   rre,%[opc] << 16,%[ucbuf],%[cbuf]\n"
                "       brc     1,0b\n"   /* handle partial completion */
-               : [ucbuf] "+a" (r2), [ucbuflen] "+d" (r3),
-                 [cbuf] "+a" (r4), [cbuflen] "+d" (r5)
-               : [fc] "d" (r0), [opc] "i" (CPACF_PRNO)
-               : "cc", "memory");
+               : [ucbuf] "+&d" (u.pair), [cbuf] "+&d" (c.pair)
+               : [fc] "K" (CPACF_PRNO_TRNG), [opc] "i" (CPACF_PRNO)
+               : "cc", "memory", "0");
 }
 
 /**
@@ -466,15 +478,15 @@ static inline void cpacf_trng(u8 *ucbuf, unsigned long ucbuf_len,
  */
 static inline void cpacf_pcc(unsigned long func, void *param)
 {
-       register unsigned long r0 asm("0") = (unsigned long) func;
-       register unsigned long r1 asm("1") = (unsigned long) param;
-
        asm volatile(
+               "       lgr     0,%[fc]\n"
+               "       lgr     1,%[pba]\n"
                "0:     .insn   rre,%[opc] << 16,0,0\n" /* PCC opcode */
                "       brc     1,0b\n" /* handle partial completion */
                :
-               : [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_PCC)
-               : "cc", "memory");
+               : [fc] "d" (func), [pba] "d" ((unsigned long)param),
+                 [opc] "i" (CPACF_PCC)
+               : "cc", "memory", "0", "1");
 }
 
 /**
@@ -487,14 +499,14 @@ static inline void cpacf_pcc(unsigned long func, void *param)
  */
 static inline void cpacf_pckmo(long func, void *param)
 {
-       register unsigned long r0 asm("0") = (unsigned long) func;
-       register unsigned long r1 asm("1") = (unsigned long) param;
-
        asm volatile(
+               "       lgr     0,%[fc]\n"
+               "       lgr     1,%[pba]\n"
                "       .insn   rre,%[opc] << 16,0,0\n" /* PCKMO opcode */
                :
-               : [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_PCKMO)
-               : "cc", "memory");
+               : [fc] "d" (func), [pba] "d" ((unsigned long)param),
+                 [opc] "i" (CPACF_PCKMO)
+               : "cc", "memory", "0", "1");
 }
 
 /**
@@ -512,21 +524,23 @@ static inline void cpacf_kma(unsigned long func, void *param, u8 *dest,
                             const u8 *src, unsigned long src_len,
                             const u8 *aad, unsigned long aad_len)
 {
-       register unsigned long r0 asm("0") = (unsigned long) func;
-       register unsigned long r1 asm("1") = (unsigned long) param;
-       register unsigned long r2 asm("2") = (unsigned long) src;
-       register unsigned long r3 asm("3") = (unsigned long) src_len;
-       register unsigned long r4 asm("4") = (unsigned long) aad;
-       register unsigned long r5 asm("5") = (unsigned long) aad_len;
-       register unsigned long r6 asm("6") = (unsigned long) dest;
+       union register_pair d, s, a;
 
+       d.even = (unsigned long)dest;
+       s.even = (unsigned long)src;
+       s.odd  = (unsigned long)src_len;
+       a.even = (unsigned long)aad;
+       a.odd  = (unsigned long)aad_len;
        asm volatile(
+               "       lgr     0,%[fc]\n"
+               "       lgr     1,%[pba]\n"
                "0:     .insn   rrf,%[opc] << 16,%[dst],%[src],%[aad],0\n"
                "       brc     1,0b\n" /* handle partial completion */
-               : [dst] "+a" (r6), [src] "+a" (r2), [slen] "+d" (r3),
-                 [aad] "+a" (r4), [alen] "+d" (r5)
-               : [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_KMA)
-               : "cc", "memory");
+               : [dst] "+&d" (d.pair), [src] "+&d" (s.pair),
+                 [aad] "+&d" (a.pair)
+               : [fc] "d" (func), [pba] "d" ((unsigned long)param),
+                 [opc] "i" (CPACF_KMA)
+               : "cc", "memory", "0", "1");
 }
 
 #endif /* _ASM_S390_CPACF_H */
index 1d007c6..14cfd48 100644 (file)
@@ -23,7 +23,7 @@
 #define MAX_ELF_HWCAP_FEATURES (8 * sizeof(elf_hwcap))
 #define MAX_CPU_FEATURES       MAX_ELF_HWCAP_FEATURES
 
-#define cpu_feature(feat)      ilog2(HWCAP_S390_ ## feat)
+#define cpu_feature(feat)      ilog2(HWCAP_ ## feat)
 
 int cpu_have_feature(unsigned int nr);
 
index adc0179..04dc65f 100644 (file)
@@ -111,6 +111,23 @@ union ctlreg2 {
        };
 };
 
+union ctlreg5 {
+       unsigned long val;
+       struct {
+               unsigned long       : 33;
+               unsigned long pasteo: 25;
+               unsigned long       : 6;
+       };
+};
+
+union ctlreg15 {
+       unsigned long val;
+       struct {
+               unsigned long lsea  : 61;
+               unsigned long       : 3;
+       };
+};
+
 #define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
 #define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
 
index c1b82bc..19a55e1 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/time.h>
 #include <linux/refcount.h>
 #include <linux/fs.h>
+#include <linux/init.h>
 
 #define DEBUG_MAX_LEVEL                   6  /* debug levels range from 0 to 6 */
 #define DEBUG_OFF_LEVEL                   -1 /* level where debug is switched off */
@@ -391,38 +392,99 @@ int debug_register_view(debug_info_t *id, struct debug_view *view);
 
 int debug_unregister_view(debug_info_t *id, struct debug_view *view);
 
+#ifndef MODULE
+
+/*
+ * Note: Initial page and area numbers must be fixed to allow static
+ * initialization. This enables very early tracing. Changes to these values
+ * must be reflected in __DEFINE_STATIC_AREA.
+ */
+#define EARLY_PAGES            8
+#define EARLY_AREAS            1
+
+#define VNAME(var, suffix)     __##var##_##suffix
+
 /*
-   define the debug levels:
-   - 0 No debugging output to console or syslog
-   - 1 Log internal errors to syslog, ignore check conditions
-   - 2 Log internal errors and check conditions to syslog
-   - 3 Log internal errors to console, log check conditions to syslog
-   - 4 Log internal errors and check conditions to console
-   - 5 panic on internal errors, log check conditions to console
-   - 6 panic on both, internal errors and check conditions
+ * Define static areas for early trace data. During boot debug_register_static()
+ * will replace these with dynamically allocated areas to allow custom page and
+ * area sizes, and dynamic resizing.
  */
+#define __DEFINE_STATIC_AREA(var)                                      \
+static char VNAME(var, data)[EARLY_PAGES][PAGE_SIZE] __initdata;       \
+static debug_entry_t *VNAME(var, pages)[EARLY_PAGES] __initdata = {    \
+       (debug_entry_t *)VNAME(var, data)[0],                           \
+       (debug_entry_t *)VNAME(var, data)[1],                           \
+       (debug_entry_t *)VNAME(var, data)[2],                           \
+       (debug_entry_t *)VNAME(var, data)[3],                           \
+       (debug_entry_t *)VNAME(var, data)[4],                           \
+       (debug_entry_t *)VNAME(var, data)[5],                           \
+       (debug_entry_t *)VNAME(var, data)[6],                           \
+       (debug_entry_t *)VNAME(var, data)[7],                           \
+};                                                                     \
+static debug_entry_t **VNAME(var, areas)[EARLY_AREAS] __initdata = {   \
+       (debug_entry_t **)VNAME(var, pages),                            \
+};                                                                     \
+static int VNAME(var, active_pages)[EARLY_AREAS] __initdata;           \
+static int VNAME(var, active_entries)[EARLY_AREAS] __initdata
+
+#define __DEBUG_INFO_INIT(var, _name, _buf_size) {                     \
+       .next = NULL,                                                   \
+       .prev = NULL,                                                   \
+       .ref_count = REFCOUNT_INIT(1),                                  \
+       .lock = __SPIN_LOCK_UNLOCKED(var.lock),                         \
+       .level = DEBUG_DEFAULT_LEVEL,                                   \
+       .nr_areas = EARLY_AREAS,                                        \
+       .pages_per_area = EARLY_PAGES,                                  \
+       .buf_size = (_buf_size),                                        \
+       .entry_size = sizeof(debug_entry_t) + (_buf_size),              \
+       .areas = VNAME(var, areas),                                     \
+       .active_area = 0,                                               \
+       .active_pages = VNAME(var, active_pages),                       \
+       .active_entries = VNAME(var, active_entries),                   \
+       .debugfs_root_entry = NULL,                                     \
+       .debugfs_entries = { NULL },                                    \
+       .views = { NULL },                                              \
+       .name = (_name),                                                \
+       .mode = 0600,                                                   \
+}
+
+#define __REGISTER_STATIC_DEBUG_INFO(var, name, pages, areas, view)    \
+static int __init VNAME(var, reg)(void)                                        \
+{                                                                      \
+       debug_register_static(&var, (pages), (areas));                  \
+       debug_register_view(&var, (view));                              \
+       return 0;                                                       \
+}                                                                      \
+arch_initcall(VNAME(var, reg))
+
+/**
+ * DEFINE_STATIC_DEBUG_INFO - Define static debug_info_t
+ *
+ * @var: Name of debug_info_t variable
+ * @name: Name of debug log (e.g. used for debugfs entry)
+ * @pages_per_area: Number of pages per area
+ * @nr_areas: Number of debug areas
+ * @buf_size: Size of data area in each debug entry
+ * @view: Pointer to debug view struct
+ *
+ * Define a static debug_info_t for early tracing. The associated debugfs log
+ * is automatically registered with the specified debug view.
+ *
+ * Important: Users of this macro must not call any of the
+ * debug_register/_unregister() functions for this debug_info_t!
+ *
+ * Note: Tracing will start with a fixed number of initial pages and areas.
+ * The debug area will be changed to use the specified numbers during
+ * arch_initcall.
+ */
+#define DEFINE_STATIC_DEBUG_INFO(var, name, pages, nr_areas, buf_size, view) \
+__DEFINE_STATIC_AREA(var);                                             \
+static debug_info_t __refdata var =                                    \
+       __DEBUG_INFO_INIT(var, (name), (buf_size));                     \
+__REGISTER_STATIC_DEBUG_INFO(var, name, pages, nr_areas, view)
+
+void debug_register_static(debug_info_t *id, int pages_per_area, int nr_areas);
 
-#ifndef DEBUG_LEVEL
-#define DEBUG_LEVEL 4
-#endif
-
-#define INTERNAL_ERRMSG(x,y...) "E" __FILE__ "%d: " x, __LINE__, y
-#define INTERNAL_WRNMSG(x,y...) "W" __FILE__ "%d: " x, __LINE__, y
-#define INTERNAL_INFMSG(x,y...) "I" __FILE__ "%d: " x, __LINE__, y
-#define INTERNAL_DEBMSG(x,y...) "D" __FILE__ "%d: " x, __LINE__, y
-
-#if DEBUG_LEVEL > 0
-#define PRINT_DEBUG(x...)      printk(KERN_DEBUG PRINTK_HEADER x)
-#define PRINT_INFO(x...)       printk(KERN_INFO PRINTK_HEADER x)
-#define PRINT_WARN(x...)       printk(KERN_WARNING PRINTK_HEADER x)
-#define PRINT_ERR(x...)                printk(KERN_ERR PRINTK_HEADER x)
-#define PRINT_FATAL(x...)      panic(PRINTK_HEADER x)
-#else
-#define PRINT_DEBUG(x...)      printk(KERN_DEBUG PRINTK_HEADER x)
-#define PRINT_INFO(x...)       printk(KERN_DEBUG PRINTK_HEADER x)
-#define PRINT_WARN(x...)       printk(KERN_DEBUG PRINTK_HEADER x)
-#define PRINT_ERR(x...)                printk(KERN_DEBUG PRINTK_HEADER x)
-#define PRINT_FATAL(x...)      printk(KERN_DEBUG PRINTK_HEADER x)
-#endif /* DASD_DEBUG */
+#endif /* MODULE */
 
 #endif /* DEBUG_H */
index ca8f85b..b3a8cb4 100644 (file)
@@ -309,6 +309,10 @@ int diag26c(void *req, void *resp, enum diag26c_sc subcode);
 
 struct hypfs_diag0c_entry;
 
+/*
+ * This structure must contain only pointers/references into
+ * the AMODE31 text section.
+ */
 struct diag_ops {
        int (*diag210)(struct diag210 *addr);
        int (*diag26c)(void *req, void *resp, enum diag26c_sc subcode);
@@ -317,6 +321,13 @@ struct diag_ops {
        void (*diag308_reset)(void);
 };
 
-extern struct diag_ops diag_dma_ops;
-extern struct diag210 *__diag210_tmp_dma;
+extern struct diag_ops diag_amode31_ops;
+extern struct diag210 *__diag210_tmp_amode31;
+
+int _diag210_amode31(struct diag210 *addr);
+int _diag26c_amode31(void *req, void *resp, enum diag26c_sc subcode);
+int _diag14_amode31(unsigned long rx, unsigned long ry1, unsigned long subcode);
+void _diag0c_amode31(struct hypfs_diag0c_entry *entry);
+void _diag308_reset_amode31(void);
+
 #endif /* _ASM_S390_DIAG_H */
index bd00c94..70a30ae 100644 (file)
 /* Keep this the last entry.  */
 #define R_390_NUM      61
 
-/* Bits present in AT_HWCAP. */
-#define HWCAP_S390_ESAN3       1
-#define HWCAP_S390_ZARCH       2
-#define HWCAP_S390_STFLE       4
-#define HWCAP_S390_MSA         8
-#define HWCAP_S390_LDISP       16
-#define HWCAP_S390_EIMM                32
-#define HWCAP_S390_DFP         64
-#define HWCAP_S390_HPAGE       128
-#define HWCAP_S390_ETF3EH      256
-#define HWCAP_S390_HIGH_GPRS   512
-#define HWCAP_S390_TE          1024
-#define HWCAP_S390_VXRS                2048
-#define HWCAP_S390_VXRS_BCD    4096
-#define HWCAP_S390_VXRS_EXT    8192
-#define HWCAP_S390_GS          16384
-#define HWCAP_S390_VXRS_EXT2   32768
-#define HWCAP_S390_VXRS_PDE    65536
-#define HWCAP_S390_SORT                131072
-#define HWCAP_S390_DFLT                262144
+enum {
+       HWCAP_NR_ESAN3          = 0,
+       HWCAP_NR_ZARCH          = 1,
+       HWCAP_NR_STFLE          = 2,
+       HWCAP_NR_MSA            = 3,
+       HWCAP_NR_LDISP          = 4,
+       HWCAP_NR_EIMM           = 5,
+       HWCAP_NR_DFP            = 6,
+       HWCAP_NR_HPAGE          = 7,
+       HWCAP_NR_ETF3EH         = 8,
+       HWCAP_NR_HIGH_GPRS      = 9,
+       HWCAP_NR_TE             = 10,
+       HWCAP_NR_VXRS           = 11,
+       HWCAP_NR_VXRS_BCD       = 12,
+       HWCAP_NR_VXRS_EXT       = 13,
+       HWCAP_NR_GS             = 14,
+       HWCAP_NR_VXRS_EXT2      = 15,
+       HWCAP_NR_VXRS_PDE       = 16,
+       HWCAP_NR_SORT           = 17,
+       HWCAP_NR_DFLT           = 18,
+       HWCAP_NR_VXRS_PDE2      = 19,
+       HWCAP_NR_NNPA           = 20,
+       HWCAP_NR_PCI_MIO        = 21,
+       HWCAP_NR_SIE            = 22,
+       HWCAP_NR_MAX
+};
 
-/* Internal bits, not exposed via elf */
-#define HWCAP_INT_SIE          1UL
+/* Bits present in AT_HWCAP. */
+#define HWCAP_ESAN3            BIT(HWCAP_NR_ESAN3)
+#define HWCAP_ZARCH            BIT(HWCAP_NR_ZARCH)
+#define HWCAP_STFLE            BIT(HWCAP_NR_STFLE)
+#define HWCAP_MSA              BIT(HWCAP_NR_MSA)
+#define HWCAP_LDISP            BIT(HWCAP_NR_LDISP)
+#define HWCAP_EIMM             BIT(HWCAP_NR_EIMM)
+#define HWCAP_DFP              BIT(HWCAP_NR_DFP)
+#define HWCAP_HPAGE            BIT(HWCAP_NR_HPAGE)
+#define HWCAP_ETF3EH           BIT(HWCAP_NR_ETF3EH)
+#define HWCAP_HIGH_GPRS                BIT(HWCAP_NR_HIGH_GPRS)
+#define HWCAP_TE               BIT(HWCAP_NR_TE)
+#define HWCAP_VXRS             BIT(HWCAP_NR_VXRS)
+#define HWCAP_VXRS_BCD         BIT(HWCAP_NR_VXRS_BCD)
+#define HWCAP_VXRS_EXT         BIT(HWCAP_NR_VXRS_EXT)
+#define HWCAP_GS               BIT(HWCAP_NR_GS)
+#define HWCAP_VXRS_EXT2                BIT(HWCAP_NR_VXRS_EXT2)
+#define HWCAP_VXRS_PDE         BIT(HWCAP_NR_VXRS_PDE)
+#define HWCAP_SORT             BIT(HWCAP_NR_SORT)
+#define HWCAP_DFLT             BIT(HWCAP_NR_DFLT)
+#define HWCAP_VXRS_PDE2                BIT(HWCAP_NR_VXRS_PDE2)
+#define HWCAP_NNPA             BIT(HWCAP_NR_NNPA)
+#define HWCAP_PCI_MIO          BIT(HWCAP_NR_PCI_MIO)
+#define HWCAP_SIE              BIT(HWCAP_NR_SIE)
 
 /*
  * These are used to set parameters in the core dumps.
@@ -209,10 +237,6 @@ struct arch_elf_state {
 extern unsigned long elf_hwcap;
 #define ELF_HWCAP (elf_hwcap)
 
-/* Internal hardware capabilities, not exposed via elf */
-
-extern unsigned long int_hwcap;
-
 /* This yields a string that ld.so will use to load implementation
    specific libraries for optimization.  This is more specific in
    intent than poking at uname or /proc/cpuinfo.
index 3beb294..16dc57d 100644 (file)
@@ -28,8 +28,8 @@ struct exception_table_entry
        long handler;
 };
 
-extern struct exception_table_entry *__start_dma_ex_table;
-extern struct exception_table_entry *__stop_dma_ex_table;
+extern struct exception_table_entry *__start_amode31_ex_table;
+extern struct exception_table_entry *__stop_amode31_ex_table;
 
 const struct exception_table_entry *s390_search_extables(unsigned long addr);
 
index 345cbe9..e8b460f 100644 (file)
@@ -18,7 +18,6 @@
 void ftrace_caller(void);
 
 extern char ftrace_graph_caller_end;
-extern unsigned long ftrace_plt;
 extern void *ftrace_func;
 
 struct dyn_arch_ftrace { };
@@ -31,10 +30,11 @@ struct dyn_arch_ftrace { };
 
 struct module;
 struct dyn_ftrace;
-/*
- * Either -mhotpatch or -mnop-mcount is used - no explicit init is required
- */
-static inline int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) { return 0; }
+
+bool ftrace_need_init_nop(void);
+#define ftrace_need_init_nop ftrace_need_init_nop
+
+int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
 #define ftrace_init_nop ftrace_init_nop
 
 static inline unsigned long ftrace_call_adjust(unsigned long addr)
@@ -42,42 +42,6 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
        return addr;
 }
 
-struct ftrace_insn {
-       u16 opc;
-       s32 disp;
-} __packed;
-
-static inline void ftrace_generate_nop_insn(struct ftrace_insn *insn)
-{
-#ifdef CONFIG_FUNCTION_TRACER
-       /* brcl 0,0 */
-       insn->opc = 0xc004;
-       insn->disp = 0;
-#endif
-}
-
-static inline int is_ftrace_nop(struct ftrace_insn *insn)
-{
-#ifdef CONFIG_FUNCTION_TRACER
-       if (insn->disp == 0)
-               return 1;
-#endif
-       return 0;
-}
-
-static inline void ftrace_generate_call_insn(struct ftrace_insn *insn,
-                                            unsigned long ip)
-{
-#ifdef CONFIG_FUNCTION_TRACER
-       unsigned long target;
-
-       /* brasl r0,ftrace_caller */
-       target = is_module_addr((void *) ip) ? ftrace_plt : FTRACE_ADDR;
-       insn->opc = 0xc005;
-       insn->disp = (target - ip) / 2;
-#endif
-}
-
 /*
  * Even though the system call numbers are identical for s390/s390x a
  * different system call table is used for compat tasks. This may lead
diff --git a/arch/s390/include/asm/ftrace.lds.h b/arch/s390/include/asm/ftrace.lds.h
new file mode 100644 (file)
index 0000000..968adfd
--- /dev/null
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef DIV_ROUND_UP
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#endif
+
+#define SIZEOF_MCOUNT_LOC_ENTRY 8
+#define SIZEOF_FTRACE_HOTPATCH_TRAMPOLINE 24
+#define FTRACE_HOTPATCH_TRAMPOLINES_SIZE(n)                                   \
+       DIV_ROUND_UP(SIZEOF_FTRACE_HOTPATCH_TRAMPOLINE * (n),                  \
+                    SIZEOF_MCOUNT_LOC_ENTRY)
+
+#ifdef CONFIG_FUNCTION_TRACER
+#define FTRACE_HOTPATCH_TRAMPOLINES_TEXT                                      \
+       . = ALIGN(8);                                                          \
+       __ftrace_hotpatch_trampolines_start = .;                               \
+       . = . + FTRACE_HOTPATCH_TRAMPOLINES_SIZE(__stop_mcount_loc -           \
+                                                __start_mcount_loc);          \
+       __ftrace_hotpatch_trampolines_end = .;
+#else
+#define FTRACE_HOTPATCH_TRAMPOLINES_TEXT
+#endif
index a9e2c72..3f8ee25 100644 (file)
@@ -12,6 +12,7 @@
 #include <asm/types.h>
 #include <asm/cio.h>
 #include <asm/setup.h>
+#include <asm/page.h>
 #include <uapi/asm/ipl.h>
 
 struct ipl_parameter_block {
diff --git a/arch/s390/include/asm/kfence.h b/arch/s390/include/asm/kfence.h
new file mode 100644 (file)
index 0000000..d55ba87
--- /dev/null
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_S390_KFENCE_H
+#define _ASM_S390_KFENCE_H
+
+#include <linux/mm.h>
+#include <linux/kfence.h>
+#include <asm/set_memory.h>
+#include <asm/page.h>
+
+void __kernel_map_pages(struct page *page, int numpages, int enable);
+
+static __always_inline bool arch_kfence_init_pool(void)
+{
+       return true;
+}
+
+#define arch_kfence_test_address(addr) ((addr) & PAGE_MASK)
+
+/*
+ * Do not split kfence pool to 4k mapping with arch_kfence_init_pool(),
+ * but earlier where page table allocations still happen with memblock.
+ * Reason is that arch_kfence_init_pool() gets called when the system
+ * is still in a limbo state - disabling and enabling bottom halves is
+ * not yet allowed, but that is what our page_table_alloc() would do.
+ */
+static __always_inline void kfence_split_mapping(void)
+{
+#ifdef CONFIG_KFENCE
+       unsigned long pool_pages = KFENCE_POOL_SIZE >> PAGE_SHIFT;
+
+       set_memory_4k((unsigned long)__kfence_pool, pool_pages);
+#endif
+}
+
+static inline bool kfence_protect_page(unsigned long addr, bool protect)
+{
+       __kernel_map_pages(virt_to_page(addr), 1, !protect);
+       return true;
+}
+
+#endif /* _ASM_S390_KFENCE_H */
index 9b4473f..161a9e1 100644 (file)
@@ -445,15 +445,15 @@ struct kvm_vcpu_stat {
        u64 instruction_sigp_init_cpu_reset;
        u64 instruction_sigp_cpu_reset;
        u64 instruction_sigp_unknown;
-       u64 diagnose_10;
-       u64 diagnose_44;
-       u64 diagnose_9c;
-       u64 diagnose_9c_ignored;
-       u64 diagnose_9c_forward;
-       u64 diagnose_258;
-       u64 diagnose_308;
-       u64 diagnose_500;
-       u64 diagnose_other;
+       u64 instruction_diagnose_10;
+       u64 instruction_diagnose_44;
+       u64 instruction_diagnose_9c;
+       u64 diag_9c_ignored;
+       u64 diag_9c_forward;
+       u64 instruction_diagnose_258;
+       u64 instruction_diagnose_308;
+       u64 instruction_diagnose_500;
+       u64 instruction_diagnose_other;
        u64 pfault_sync;
 };
 
index cbc7c3a..df73a05 100644 (file)
 #include <uapi/asm/kvm_para.h>
 #include <asm/diag.h>
 
-static inline long __kvm_hypercall0(unsigned long nr)
-{
-       register unsigned long __nr asm("1") = nr;
-       register long __rc asm("2");
-
-       asm volatile ("diag 2,4,0x500\n"
-                     : "=d" (__rc) : "d" (__nr): "memory", "cc");
-       return __rc;
-}
-
-static inline long kvm_hypercall0(unsigned long nr)
-{
-       diag_stat_inc(DIAG_STAT_X500);
-       return __kvm_hypercall0(nr);
-}
-
-static inline long __kvm_hypercall1(unsigned long nr, unsigned long p1)
-{
-       register unsigned long __nr asm("1") = nr;
-       register unsigned long __p1 asm("2") = p1;
-       register long __rc asm("2");
-
-       asm volatile ("diag 2,4,0x500\n"
-                     : "=d" (__rc) : "d" (__nr), "0" (__p1) : "memory", "cc");
-       return __rc;
-}
-
-static inline long kvm_hypercall1(unsigned long nr, unsigned long p1)
-{
-       diag_stat_inc(DIAG_STAT_X500);
-       return __kvm_hypercall1(nr, p1);
-}
-
-static inline long __kvm_hypercall2(unsigned long nr, unsigned long p1,
-                              unsigned long p2)
-{
-       register unsigned long __nr asm("1") = nr;
-       register unsigned long __p1 asm("2") = p1;
-       register unsigned long __p2 asm("3") = p2;
-       register long __rc asm("2");
-
-       asm volatile ("diag 2,4,0x500\n"
-                     : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2)
-                     : "memory", "cc");
-       return __rc;
-}
-
-static inline long kvm_hypercall2(unsigned long nr, unsigned long p1,
-                              unsigned long p2)
-{
-       diag_stat_inc(DIAG_STAT_X500);
-       return __kvm_hypercall2(nr, p1, p2);
-}
-
-static inline long __kvm_hypercall3(unsigned long nr, unsigned long p1,
-                              unsigned long p2, unsigned long p3)
-{
-       register unsigned long __nr asm("1") = nr;
-       register unsigned long __p1 asm("2") = p1;
-       register unsigned long __p2 asm("3") = p2;
-       register unsigned long __p3 asm("4") = p3;
-       register long __rc asm("2");
-
-       asm volatile ("diag 2,4,0x500\n"
-                     : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2),
-                       "d" (__p3) : "memory", "cc");
-       return __rc;
-}
-
-static inline long kvm_hypercall3(unsigned long nr, unsigned long p1,
-                              unsigned long p2, unsigned long p3)
-{
-       diag_stat_inc(DIAG_STAT_X500);
-       return __kvm_hypercall3(nr, p1, p2, p3);
-}
-
-static inline long __kvm_hypercall4(unsigned long nr, unsigned long p1,
-                              unsigned long p2, unsigned long p3,
-                              unsigned long p4)
-{
-       register unsigned long __nr asm("1") = nr;
-       register unsigned long __p1 asm("2") = p1;
-       register unsigned long __p2 asm("3") = p2;
-       register unsigned long __p3 asm("4") = p3;
-       register unsigned long __p4 asm("5") = p4;
-       register long __rc asm("2");
-
-       asm volatile ("diag 2,4,0x500\n"
-                     : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2),
-                       "d" (__p3), "d" (__p4) : "memory", "cc");
-       return __rc;
-}
-
-static inline long kvm_hypercall4(unsigned long nr, unsigned long p1,
-                              unsigned long p2, unsigned long p3,
-                              unsigned long p4)
-{
-       diag_stat_inc(DIAG_STAT_X500);
-       return __kvm_hypercall4(nr, p1, p2, p3, p4);
-}
-
-static inline long __kvm_hypercall5(unsigned long nr, unsigned long p1,
-                              unsigned long p2, unsigned long p3,
-                              unsigned long p4, unsigned long p5)
-{
-       register unsigned long __nr asm("1") = nr;
-       register unsigned long __p1 asm("2") = p1;
-       register unsigned long __p2 asm("3") = p2;
-       register unsigned long __p3 asm("4") = p3;
-       register unsigned long __p4 asm("5") = p4;
-       register unsigned long __p5 asm("6") = p5;
-       register long __rc asm("2");
-
-       asm volatile ("diag 2,4,0x500\n"
-                     : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2),
-                       "d" (__p3), "d" (__p4), "d" (__p5)  : "memory", "cc");
-       return __rc;
-}
-
-static inline long kvm_hypercall5(unsigned long nr, unsigned long p1,
-                              unsigned long p2, unsigned long p3,
-                              unsigned long p4, unsigned long p5)
-{
-       diag_stat_inc(DIAG_STAT_X500);
-       return __kvm_hypercall5(nr, p1, p2, p3, p4, p5);
-}
-
-static inline long __kvm_hypercall6(unsigned long nr, unsigned long p1,
-                              unsigned long p2, unsigned long p3,
-                              unsigned long p4, unsigned long p5,
-                              unsigned long p6)
-{
-       register unsigned long __nr asm("1") = nr;
-       register unsigned long __p1 asm("2") = p1;
-       register unsigned long __p2 asm("3") = p2;
-       register unsigned long __p3 asm("4") = p3;
-       register unsigned long __p4 asm("5") = p4;
-       register unsigned long __p5 asm("6") = p5;
-       register unsigned long __p6 asm("7") = p6;
-       register long __rc asm("2");
-
-       asm volatile ("diag 2,4,0x500\n"
-                     : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2),
-                       "d" (__p3), "d" (__p4), "d" (__p5), "d" (__p6)
-                     : "memory", "cc");
-       return __rc;
-}
-
-static inline long kvm_hypercall6(unsigned long nr, unsigned long p1,
-                              unsigned long p2, unsigned long p3,
-                              unsigned long p4, unsigned long p5,
-                              unsigned long p6)
-{
-       diag_stat_inc(DIAG_STAT_X500);
-       return __kvm_hypercall6(nr, p1, p2, p3, p4, p5, p6);
-}
+#define HYPERCALL_FMT_0
+#define HYPERCALL_FMT_1 , "0" (r2)
+#define HYPERCALL_FMT_2 , "d" (r3) HYPERCALL_FMT_1
+#define HYPERCALL_FMT_3 , "d" (r4) HYPERCALL_FMT_2
+#define HYPERCALL_FMT_4 , "d" (r5) HYPERCALL_FMT_3
+#define HYPERCALL_FMT_5 , "d" (r6) HYPERCALL_FMT_4
+#define HYPERCALL_FMT_6 , "d" (r7) HYPERCALL_FMT_5
+
+#define HYPERCALL_PARM_0
+#define HYPERCALL_PARM_1 , unsigned long arg1
+#define HYPERCALL_PARM_2 HYPERCALL_PARM_1, unsigned long arg2
+#define HYPERCALL_PARM_3 HYPERCALL_PARM_2, unsigned long arg3
+#define HYPERCALL_PARM_4 HYPERCALL_PARM_3, unsigned long arg4
+#define HYPERCALL_PARM_5 HYPERCALL_PARM_4, unsigned long arg5
+#define HYPERCALL_PARM_6 HYPERCALL_PARM_5, unsigned long arg6
+
+#define HYPERCALL_REGS_0
+#define HYPERCALL_REGS_1                                               \
+       register unsigned long r2 asm("2") = arg1
+#define HYPERCALL_REGS_2                                               \
+       HYPERCALL_REGS_1;                                               \
+       register unsigned long r3 asm("3") = arg2
+#define HYPERCALL_REGS_3                                               \
+       HYPERCALL_REGS_2;                                               \
+       register unsigned long r4 asm("4") = arg3
+#define HYPERCALL_REGS_4                                               \
+       HYPERCALL_REGS_3;                                               \
+       register unsigned long r5 asm("5") = arg4
+#define HYPERCALL_REGS_5                                               \
+       HYPERCALL_REGS_4;                                               \
+       register unsigned long r6 asm("6") = arg5
+#define HYPERCALL_REGS_6                                               \
+       HYPERCALL_REGS_5;                                               \
+       register unsigned long r7 asm("7") = arg6
+
+#define HYPERCALL_ARGS_0
+#define HYPERCALL_ARGS_1 , arg1
+#define HYPERCALL_ARGS_2 HYPERCALL_ARGS_1, arg2
+#define HYPERCALL_ARGS_3 HYPERCALL_ARGS_2, arg3
+#define HYPERCALL_ARGS_4 HYPERCALL_ARGS_3, arg4
+#define HYPERCALL_ARGS_5 HYPERCALL_ARGS_4, arg5
+#define HYPERCALL_ARGS_6 HYPERCALL_ARGS_5, arg6
+
+#define GENERATE_KVM_HYPERCALL_FUNC(args)                              \
+static inline                                                          \
+long __kvm_hypercall##args(unsigned long nr HYPERCALL_PARM_##args)     \
+{                                                                      \
+       register unsigned long __nr asm("1") = nr;                      \
+       register long __rc asm("2");                                    \
+       HYPERCALL_REGS_##args;                                          \
+                                                                       \
+       asm volatile (                                                  \
+               "       diag    2,4,0x500\n"                            \
+               : "=d" (__rc)                                           \
+               : "d" (__nr) HYPERCALL_FMT_##args                       \
+               : "memory", "cc");                                      \
+       return __rc;                                                    \
+}                                                                      \
+                                                                       \
+static inline                                                          \
+long kvm_hypercall##args(unsigned long nr HYPERCALL_PARM_##args)       \
+{                                                                      \
+       diag_stat_inc(DIAG_STAT_X500);                                  \
+       return __kvm_hypercall##args(nr HYPERCALL_ARGS_##args);         \
+}
+
+GENERATE_KVM_HYPERCALL_FUNC(0)
+GENERATE_KVM_HYPERCALL_FUNC(1)
+GENERATE_KVM_HYPERCALL_FUNC(2)
+GENERATE_KVM_HYPERCALL_FUNC(3)
+GENERATE_KVM_HYPERCALL_FUNC(4)
+GENERATE_KVM_HYPERCALL_FUNC(5)
+GENERATE_KVM_HYPERCALL_FUNC(6)
 
 /* kvm on s390 is always paravirtualization enabled */
 static inline int kvm_para_available(void)
index 24e8fed..1ffea75 100644 (file)
@@ -22,7 +22,7 @@
 
 #define EX_TABLE(_fault, _target)                                      \
        __EX_TABLE(__ex_table, _fault, _target)
-#define EX_TABLE_DMA(_fault, _target)                                  \
-       __EX_TABLE(.dma.ex_table, _fault, _target)
+#define EX_TABLE_AMODE31(_fault, _target)                              \
+       __EX_TABLE(.amode31.ex_table, _fault, _target)
 
 #endif
index 47bde5a..11213c8 100644 (file)
@@ -124,7 +124,8 @@ struct lowcore {
        /* Restart function and parameter. */
        __u64   restart_fn;                     /* 0x0370 */
        __u64   restart_data;                   /* 0x0378 */
-       __u64   restart_source;                 /* 0x0380 */
+       __u32   restart_source;                 /* 0x0380 */
+       __u32   restart_flags;                  /* 0x0384 */
 
        /* Address space pointer. */
        __u64   kernel_asce;                    /* 0x0388 */
index e0a6d29..9f1eea1 100644 (file)
@@ -8,16 +8,14 @@
  * This file contains the s390 architecture specific module code.
  */
 
-struct mod_arch_syminfo
-{
+struct mod_arch_syminfo {
        unsigned long got_offset;
        unsigned long plt_offset;
        int got_initialized;
        int plt_initialized;
 };
 
-struct mod_arch_specific
-{
+struct mod_arch_specific {
        /* Starting offset of got in the module core memory. */
        unsigned long got_offset;
        /* Starting offset of plt in the module core memory. */
@@ -30,6 +28,14 @@ struct mod_arch_specific
        int nsyms;
        /* Additional symbol information (got and plt offsets). */
        struct mod_arch_syminfo *syminfo;
+#ifdef CONFIG_FUNCTION_TRACER
+       /* Start of memory reserved for ftrace hotpatch trampolines. */
+       struct ftrace_hotpatch_trampoline *trampolines_start;
+       /* End of memory reserved for ftrace hotpatch trampolines. */
+       struct ftrace_hotpatch_trampoline *trampolines_end;
+       /* Next unused ftrace hotpatch trampoline slot. */
+       struct ftrace_hotpatch_trampoline *next_trampoline;
+#endif /* CONFIG_FUNCTION_TRACER */
 };
 
 #endif /* _ASM_S390_MODULE_H */
index 3ba945c..d98d17a 100644 (file)
@@ -144,9 +144,6 @@ struct page;
 void arch_free_page(struct page *page, int order);
 void arch_alloc_page(struct page *page, int order);
 void arch_set_page_dat(struct page *page, int order);
-void arch_set_page_nodat(struct page *page, int order);
-int arch_test_page_nodat(struct page *page);
-void arch_set_page_states(int make_stable);
 
 static inline int devmem_is_allowed(unsigned long pfn)
 {
index 5509b22..e4803ec 100644 (file)
@@ -216,9 +216,10 @@ void zpci_remove_reserved_devices(void);
 int clp_setup_writeback_mio(void);
 int clp_scan_pci_devices(void);
 int clp_query_pci_fn(struct zpci_dev *zdev);
-int clp_enable_fh(struct zpci_dev *, u8);
-int clp_disable_fh(struct zpci_dev *);
+int clp_enable_fh(struct zpci_dev *zdev, u32 *fh, u8 nr_dma_as);
+int clp_disable_fh(struct zpci_dev *zdev, u32 *fh);
 int clp_get_state(u32 fid, enum zpci_state *state);
+int clp_refresh_fh(u32 fid, u32 *fh);
 
 /* UID */
 void update_uid_checking(bool new);
@@ -271,6 +272,8 @@ struct zpci_dev *get_zdev_by_fid(u32);
 /* DMA */
 int zpci_dma_init(void);
 void zpci_dma_exit(void);
+int zpci_dma_init_device(struct zpci_dev *zdev);
+int zpci_dma_exit_device(struct zpci_dev *zdev);
 
 /* IRQ */
 int __init zpci_irq_init(void);
index f62cd3e..3b8e89d 100644 (file)
@@ -182,8 +182,6 @@ static inline unsigned long *get_st_pto(unsigned long entry)
 }
 
 /* Prototypes */
-int zpci_dma_init_device(struct zpci_dev *);
-void zpci_dma_exit_device(struct zpci_dev *);
 void dma_free_seg_table(unsigned long);
 unsigned long *dma_alloc_cpu_table(void);
 void dma_cleanup_tables(unsigned long *);
index dcac7b2..b61426c 100644 (file)
@@ -67,15 +67,15 @@ extern unsigned long zero_page_mask;
 /* TODO: s390 cannot support io_remap_pfn_range... */
 
 #define pte_ERROR(e) \
-       printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
+       pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
 #define pmd_ERROR(e) \
-       printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
+       pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
 #define pud_ERROR(e) \
-       printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
+       pr_err("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
 #define p4d_ERROR(e) \
-       printk("%s:%d: bad p4d %p.\n", __FILE__, __LINE__, (void *) p4d_val(e))
+       pr_err("%s:%d: bad p4d %016lx.\n", __FILE__, __LINE__, p4d_val(e))
 #define pgd_ERROR(e) \
-       printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
+       pr_err("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
 
 /*
  * The vmalloc and module area will always be on the topmost area of the
index ddc7858..879b8e3 100644 (file)
@@ -26,6 +26,8 @@
 #define _CIF_MCCK_GUEST                BIT(CIF_MCCK_GUEST)
 #define _CIF_DEDICATED_CPU     BIT(CIF_DEDICATED_CPU)
 
+#define RESTART_FLAG_CTLREGS   _AC(1 << 0, U)
+
 #ifndef __ASSEMBLY__
 
 #include <linux/cpumask.h>
index cb4f73c..25b5dc3 100644 (file)
@@ -291,16 +291,15 @@ struct qdio_ssqd_desc {
 typedef void qdio_handler_t(struct ccw_device *, unsigned int, int,
                            int, int, unsigned long);
 
-/* qdio errors reported to the upper-layer program */
+/* qdio errors reported through the queue handlers: */
 #define QDIO_ERROR_ACTIVATE                    0x0001
 #define QDIO_ERROR_GET_BUF_STATE               0x0002
 #define QDIO_ERROR_SET_BUF_STATE               0x0004
+
+/* extra info for completed SBALs: */
 #define QDIO_ERROR_SLSB_STATE                  0x0100
 #define QDIO_ERROR_SLSB_PENDING                        0x0200
 
-#define QDIO_ERROR_FATAL                       0x00ff
-#define QDIO_ERROR_TEMPORARY                   0xff00
-
 /* for qdio_cleanup */
 #define QDIO_FLAG_CLEANUP_USING_CLEAR          0x01
 #define QDIO_FLAG_CLEANUP_USING_HALT           0x02
@@ -312,8 +311,6 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int,
  * @qib_param_field_format: format for qib_parm_field
  * @qib_param_field: pointer to 128 bytes or NULL, if no param field
  * @qib_rflags: rflags to set
- * @input_slib_elements: pointer to no_input_qs * 128 words of data or NULL
- * @output_slib_elements: pointer to no_output_qs * 128 words of data or NULL
  * @no_input_qs: number of input queues
  * @no_output_qs: number of output queues
  * @input_handler: handler to be called for input queues
@@ -330,27 +327,18 @@ struct qdio_initialize {
        unsigned int qib_param_field_format;
        unsigned char *qib_param_field;
        unsigned char qib_rflags;
-       unsigned long *input_slib_elements;
-       unsigned long *output_slib_elements;
        unsigned int no_input_qs;
        unsigned int no_output_qs;
        qdio_handler_t *input_handler;
        qdio_handler_t *output_handler;
        void (*irq_poll)(struct ccw_device *cdev, unsigned long data);
-       unsigned int scan_threshold;
        unsigned long int_parm;
        struct qdio_buffer ***input_sbal_addr_array;
        struct qdio_buffer ***output_sbal_addr_array;
 };
 
-#define QDIO_STATE_INACTIVE            0x00000002 /* after qdio_cleanup */
-#define QDIO_STATE_ESTABLISHED         0x00000004 /* after qdio_establish */
-#define QDIO_STATE_ACTIVE              0x00000008 /* after qdio_activate */
-#define QDIO_STATE_STOPPED             0x00000010 /* after queues went down */
-
 #define QDIO_FLAG_SYNC_INPUT           0x01
 #define QDIO_FLAG_SYNC_OUTPUT          0x02
-#define QDIO_FLAG_PCI_OUT              0x10
 
 int qdio_alloc_buffers(struct qdio_buffer **buf, unsigned int count);
 void qdio_free_buffers(struct qdio_buffer **buf, unsigned int count);
@@ -367,7 +355,6 @@ extern int do_QDIO(struct ccw_device *cdev, unsigned int callflags, int q_nr,
                   unsigned int bufnr, unsigned int count, struct qaob *aob);
 extern int qdio_start_irq(struct ccw_device *cdev);
 extern int qdio_stop_irq(struct ccw_device *cdev);
-extern int qdio_get_next_buffers(struct ccw_device *, int, int *, int *);
 extern int qdio_inspect_queue(struct ccw_device *cdev, unsigned int nr,
                              bool is_input, unsigned int *bufnr,
                              unsigned int *error);
index 5763769..e3ae937 100644 (file)
@@ -8,8 +8,6 @@
 #define _ASM_S390_SCLP_H
 
 #include <linux/types.h>
-#include <asm/chpid.h>
-#include <asm/cpu.h>
 
 #define SCLP_CHP_INFO_MASK_SIZE                32
 #define EARLY_SCCB_SIZE                PAGE_SIZE
 /* 24 + 16 * SCLP_MAX_CORES */
 #define EXT_SCCB_READ_CPU      (3 * PAGE_SIZE)
 
+#ifndef __ASSEMBLY__
+#include <asm/chpid.h>
+#include <asm/cpu.h>
+
 struct sclp_chp_info {
        u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
        u8 standby[SCLP_CHP_INFO_MASK_SIZE];
@@ -113,6 +115,9 @@ struct zpci_report_error_header {
        u8 data[0];     /* Subsequent Data passed verbatim to SCLP ET 24 */
 } __packed;
 
+extern char *sclp_early_sccb;
+
+void sclp_early_set_buffer(void *sccb);
 int sclp_early_read_info(void);
 int sclp_early_read_storage_info(void);
 int sclp_early_get_core_info(struct sclp_core_info *info);
@@ -147,4 +152,5 @@ static inline int sclp_get_core_info(struct sclp_core_info *info, int early)
        return _sclp_get_core_info(info);
 }
 
+#endif /* __ASSEMBLY__ */
 #endif /* _ASM_S390_SCLP_H */
index 0c21514..85881dd 100644 (file)
@@ -35,7 +35,7 @@ static inline int arch_is_kernel_initmem_freed(unsigned long addr)
  */
 #define __bootdata_preserved(var) __section(".boot.preserved.data." #var) var
 
-extern unsigned long __sdma, __edma;
-extern unsigned long __stext_dma, __etext_dma;
+extern unsigned long __samode31, __eamode31;
+extern unsigned long __stext_amode31, __etext_amode31;
 
 #endif
index a22a5a8..950d87b 100644 (file)
@@ -10,6 +10,7 @@ extern struct mutex cpa_mutex;
 #define SET_MEMORY_RW  2UL
 #define SET_MEMORY_NX  4UL
 #define SET_MEMORY_X   8UL
+#define SET_MEMORY_4K  16UL
 
 int __set_memory(unsigned long addr, int numpages, unsigned long flags);
 
@@ -33,4 +34,9 @@ static inline int set_memory_x(unsigned long addr, int numpages)
        return __set_memory(addr, numpages, SET_MEMORY_X);
 }
 
+static inline int set_memory_4k(unsigned long addr, int numpages)
+{
+       return __set_memory(addr, numpages, SET_MEMORY_4K);
+}
+
 #endif
index 3a77aa9..b6606ff 100644 (file)
 #include <uapi/asm/setup.h>
 #include <linux/build_bug.h>
 
-#define EP_OFFSET              0x10008
-#define EP_STRING              "S390EP"
 #define PARMAREA               0x10400
-#define EARLY_SCCB_OFFSET      0x11000
-#define HEAD_END               0x12000
+#define HEAD_END               0x11000
 
 /*
  * Machine features detected in early.c
@@ -36,6 +33,7 @@
 #define MACHINE_FLAG_NX                BIT(15)
 #define MACHINE_FLAG_GS                BIT(16)
 #define MACHINE_FLAG_SCC       BIT(17)
+#define MACHINE_FLAG_PCI_MIO   BIT(18)
 
 #define LPP_MAGIC              BIT(31)
 #define LPP_PID_MASK           _AC(0xffffffff, UL)
 #define STARTUP_NORMAL_OFFSET  0x10000
 #define STARTUP_KDUMP_OFFSET   0x10010
 
-/* Offsets to parameters in kernel/head.S  */
-
-#define IPL_DEVICE_OFFSET      0x10400
-#define INITRD_START_OFFSET    0x10408
-#define INITRD_SIZE_OFFSET     0x10410
-#define OLDMEM_BASE_OFFSET     0x10418
-#define OLDMEM_SIZE_OFFSET     0x10420
-#define KERNEL_VERSION_OFFSET  0x10428
-#define COMMAND_LINE_OFFSET    0x10480
-
 #ifndef __ASSEMBLY__
 
 #include <asm/lowcore.h>
 #include <asm/types.h>
 
-#define IPL_DEVICE     (*(unsigned long *)  (IPL_DEVICE_OFFSET))
-#define INITRD_START   (*(unsigned long *)  (INITRD_START_OFFSET))
-#define INITRD_SIZE    (*(unsigned long *)  (INITRD_SIZE_OFFSET))
-#define OLDMEM_BASE    (*(unsigned long *)  (OLDMEM_BASE_OFFSET))
-#define OLDMEM_SIZE    (*(unsigned long *)  (OLDMEM_SIZE_OFFSET))
-#define COMMAND_LINE   ((char *)            (COMMAND_LINE_OFFSET))
-
 struct parmarea {
        unsigned long ipl_device;                       /* 0x10400 */
        unsigned long initrd_start;                     /* 0x10408 */
@@ -110,6 +91,7 @@ extern unsigned long mio_wb_bit_mask;
 #define MACHINE_HAS_NX         (S390_lowcore.machine_flags & MACHINE_FLAG_NX)
 #define MACHINE_HAS_GS         (S390_lowcore.machine_flags & MACHINE_FLAG_GS)
 #define MACHINE_HAS_SCC                (S390_lowcore.machine_flags & MACHINE_FLAG_SCC)
+#define MACHINE_HAS_PCI_MIO    (S390_lowcore.machine_flags & MACHINE_FLAG_PCI_MIO)
 
 /*
  * Console mode. Override with conmode=
@@ -161,20 +143,22 @@ static inline unsigned long kaslr_offset(void)
 
 extern int is_full_image;
 
+struct initrd_data {
+       unsigned long start;
+       unsigned long size;
+};
+extern struct initrd_data initrd_data;
+
+struct oldmem_data {
+       unsigned long start;
+       unsigned long size;
+};
+extern struct oldmem_data oldmem_data;
+
 static inline u32 gen_lpswe(unsigned long addr)
 {
        BUILD_BUG_ON(addr > 0xfff);
        return 0xb2b20000 | addr;
 }
-
-#else /* __ASSEMBLY__ */
-
-#define IPL_DEVICE     (IPL_DEVICE_OFFSET)
-#define INITRD_START   (INITRD_START_OFFSET)
-#define INITRD_SIZE    (INITRD_SIZE_OFFSET)
-#define OLDMEM_BASE    (OLDMEM_BASE_OFFSET)
-#define OLDMEM_SIZE    (OLDMEM_SIZE_OFFSET)
-#define COMMAND_LINE   (COMMAND_LINE_OFFSET)
-
 #endif /* __ASSEMBLY__ */
 #endif /* _ASM_S390_SETUP_H */
index 9107e3d..b3dd883 100644 (file)
@@ -104,4 +104,63 @@ static inline bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs)
        return false;
 }
 
+#define SYSCALL_FMT_0
+#define SYSCALL_FMT_1 , "0" (r2)
+#define SYSCALL_FMT_2 , "d" (r3) SYSCALL_FMT_1
+#define SYSCALL_FMT_3 , "d" (r4) SYSCALL_FMT_2
+#define SYSCALL_FMT_4 , "d" (r5) SYSCALL_FMT_3
+#define SYSCALL_FMT_5 , "d" (r6) SYSCALL_FMT_4
+#define SYSCALL_FMT_6 , "d" (r7) SYSCALL_FMT_5
+
+#define SYSCALL_PARM_0
+#define SYSCALL_PARM_1 , long arg1
+#define SYSCALL_PARM_2 SYSCALL_PARM_1, long arg2
+#define SYSCALL_PARM_3 SYSCALL_PARM_2, long arg3
+#define SYSCALL_PARM_4 SYSCALL_PARM_3, long arg4
+#define SYSCALL_PARM_5 SYSCALL_PARM_4, long arg5
+#define SYSCALL_PARM_6 SYSCALL_PARM_5, long arg6
+
+#define SYSCALL_REGS_0
+#define SYSCALL_REGS_1                                                 \
+       register long r2 asm("2") = arg1
+#define SYSCALL_REGS_2                                                 \
+       SYSCALL_REGS_1;                                                 \
+       register long r3 asm("3") = arg2
+#define SYSCALL_REGS_3                                                 \
+       SYSCALL_REGS_2;                                                 \
+       register long r4 asm("4") = arg3
+#define SYSCALL_REGS_4                                                 \
+       SYSCALL_REGS_3;                                                 \
+       register long r5 asm("5") = arg4
+#define SYSCALL_REGS_5                                                 \
+       SYSCALL_REGS_4;                                                 \
+       register long r6 asm("6") = arg5
+#define SYSCALL_REGS_6                                                 \
+       SYSCALL_REGS_5;                                                 \
+       register long r7 asm("7") = arg6
+
+#define GENERATE_SYSCALL_FUNC(nr)                                      \
+static __always_inline                                                 \
+long syscall##nr(unsigned long syscall SYSCALL_PARM_##nr)              \
+{                                                                      \
+       register unsigned long r1 asm ("1") = syscall;                  \
+       register long rc asm ("2");                                     \
+       SYSCALL_REGS_##nr;                                              \
+                                                                       \
+       asm volatile (                                                  \
+               "       svc     0\n"                                    \
+               : "=d" (rc)                                             \
+               : "d" (r1) SYSCALL_FMT_##nr                             \
+               : "memory");                                            \
+       return rc;                                                      \
+}
+
+GENERATE_SYSCALL_FUNC(0)
+GENERATE_SYSCALL_FUNC(1)
+GENERATE_SYSCALL_FUNC(2)
+GENERATE_SYSCALL_FUNC(3)
+GENERATE_SYSCALL_FUNC(4)
+GENERATE_SYSCALL_FUNC(5)
+GENERATE_SYSCALL_FUNC(6)
+
 #endif /* _ASM_SYSCALL_H */
index 12c5f00..fe92a4c 100644 (file)
@@ -356,11 +356,9 @@ int uv_convert_from_secure(unsigned long paddr);
 int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr);
 
 void setup_uv(void);
-void adjust_to_uv_max(unsigned long *vmax);
 #else
 #define is_prot_virt_host() 0
 static inline void setup_uv(void) {}
-static inline void adjust_to_uv_max(unsigned long *vmax) {}
 
 static inline int uv_destroy_page(unsigned long paddr)
 {
@@ -373,10 +371,4 @@ static inline int uv_convert_from_secure(unsigned long paddr)
 }
 #endif
 
-#if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM)
-void uv_query_info(void);
-#else
-static inline void uv_query_info(void) {}
-#endif
-
 #endif /* _ASM_S390_UV_H */
index d6465b2..db84942 100644 (file)
@@ -6,6 +6,7 @@
 
 #define VDSO_HAS_CLOCK_GETRES 1
 
+#include <asm/syscall.h>
 #include <asm/timex.h>
 #include <asm/unistd.h>
 #include <linux/compiler.h>
@@ -35,35 +36,20 @@ static inline u64 __arch_get_hw_counter(s32 clock_mode, const struct vdso_data *
 static __always_inline
 long clock_gettime_fallback(clockid_t clkid, struct __kernel_timespec *ts)
 {
-       register unsigned long r1 __asm__("r1") = __NR_clock_gettime;
-       register unsigned long r2 __asm__("r2") = (unsigned long)clkid;
-       register void *r3 __asm__("r3") = ts;
-
-       asm ("svc 0\n" : "+d" (r2) : "d" (r1), "d" (r3) : "cc", "memory");
-       return r2;
+       return syscall2(__NR_clock_gettime, (long)clkid, (long)ts);
 }
 
 static __always_inline
 long gettimeofday_fallback(register struct __kernel_old_timeval *tv,
                           register struct timezone *tz)
 {
-       register unsigned long r1 __asm__("r1") = __NR_gettimeofday;
-       register unsigned long r2 __asm__("r2") = (unsigned long)tv;
-       register void *r3 __asm__("r3") = tz;
-
-       asm ("svc 0\n" : "+d" (r2) : "d" (r1), "d" (r3) : "cc", "memory");
-       return r2;
+       return syscall2(__NR_gettimeofday, (long)tv, (long)tz);
 }
 
 static __always_inline
 long clock_getres_fallback(clockid_t clkid, struct __kernel_timespec *ts)
 {
-       register unsigned long r1 __asm__("r1") = __NR_clock_getres;
-       register unsigned long r2 __asm__("r2") = (unsigned long)clkid;
-       register void *r3 __asm__("r3") = ts;
-
-       asm ("svc 0\n" : "+d" (r2) : "d" (r1), "d" (r3) : "cc", "memory");
-       return r2;
+       return syscall2(__NR_clock_getres, (long)clkid, (long)ts);
 }
 
 #ifdef CONFIG_TIME_NS
index 4a44ba5..80f500f 100644 (file)
@@ -40,7 +40,7 @@ obj-y += sysinfo.o lgr.o os_info.o machine_kexec.o
 obj-y  += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o
 obj-y  += entry.o reipl.o relocate_kernel.o kdebugfs.o alternative.o
 obj-y  += nospec-branch.o ipl_vmparm.o machine_kexec_reloc.o unwind_bc.o
-obj-y  += smp.o
+obj-y  += smp.o text_amode31.o
 
 extra-y                                += head64.o vmlinux.lds
 
index 77ff213..b57da93 100644 (file)
@@ -116,6 +116,7 @@ int main(void)
        OFFSET(__LC_RESTART_FN, lowcore, restart_fn);
        OFFSET(__LC_RESTART_DATA, lowcore, restart_data);
        OFFSET(__LC_RESTART_SOURCE, lowcore, restart_source);
+       OFFSET(__LC_RESTART_FLAGS, lowcore, restart_flags);
        OFFSET(__LC_KERNEL_ASCE, lowcore, kernel_asce);
        OFFSET(__LC_USER_ASCE, lowcore, user_asce);
        OFFSET(__LC_LPP, lowcore, lpp);
@@ -152,5 +153,12 @@ int main(void)
        DEFINE(__KEXEC_SHA_REGION_SIZE, sizeof(struct kexec_sha_region));
        /* sizeof kernel parameter area */
        DEFINE(__PARMAREA_SIZE, sizeof(struct parmarea));
+       /* kernel parameter area offsets */
+       DEFINE(IPL_DEVICE, PARMAREA + offsetof(struct parmarea, ipl_device));
+       DEFINE(INITRD_START, PARMAREA + offsetof(struct parmarea, initrd_start));
+       DEFINE(INITRD_SIZE, PARMAREA + offsetof(struct parmarea, initrd_size));
+       DEFINE(OLDMEM_BASE, PARMAREA + offsetof(struct parmarea, oldmem_base));
+       DEFINE(OLDMEM_SIZE, PARMAREA + offsetof(struct parmarea, oldmem_size));
+       DEFINE(COMMAND_LINE, PARMAREA + offsetof(struct parmarea, command_line));
        return 0;
 }
index 0e36dfc..d72a6df 100644 (file)
@@ -140,7 +140,7 @@ int copy_oldmem_kernel(void *dst, void *src, size_t count)
 
        while (count) {
                from = __pa(src);
-               if (!OLDMEM_BASE && from < sclp.hsa_size) {
+               if (!oldmem_data.start && from < sclp.hsa_size) {
                        /* Copy from zfcp/nvme dump HSA area */
                        len = min(count, sclp.hsa_size - from);
                        rc = memcpy_hsa_kernel(dst, from, len);
@@ -148,12 +148,12 @@ int copy_oldmem_kernel(void *dst, void *src, size_t count)
                                return rc;
                } else {
                        /* Check for swapped kdump oldmem areas */
-                       if (OLDMEM_BASE && from - OLDMEM_BASE < OLDMEM_SIZE) {
-                               from -= OLDMEM_BASE;
-                               len = min(count, OLDMEM_SIZE - from);
-                       } else if (OLDMEM_BASE && from < OLDMEM_SIZE) {
-                               len = min(count, OLDMEM_SIZE - from);
-                               from += OLDMEM_BASE;
+                       if (oldmem_data.start && from - oldmem_data.start < oldmem_data.size) {
+                               from -= oldmem_data.start;
+                               len = min(count, oldmem_data.size - from);
+                       } else if (oldmem_data.start && from < oldmem_data.size) {
+                               len = min(count, oldmem_data.size - from);
+                               from += oldmem_data.start;
                        } else {
                                len = count;
                        }
@@ -183,7 +183,7 @@ static int copy_oldmem_user(void __user *dst, void *src, size_t count)
 
        while (count) {
                from = __pa(src);
-               if (!OLDMEM_BASE && from < sclp.hsa_size) {
+               if (!oldmem_data.start && from < sclp.hsa_size) {
                        /* Copy from zfcp/nvme dump HSA area */
                        len = min(count, sclp.hsa_size - from);
                        rc = memcpy_hsa_user(dst, from, len);
@@ -191,12 +191,12 @@ static int copy_oldmem_user(void __user *dst, void *src, size_t count)
                                return rc;
                } else {
                        /* Check for swapped kdump oldmem areas */
-                       if (OLDMEM_BASE && from - OLDMEM_BASE < OLDMEM_SIZE) {
-                               from -= OLDMEM_BASE;
-                               len = min(count, OLDMEM_SIZE - from);
-                       } else if (OLDMEM_BASE && from < OLDMEM_SIZE) {
-                               len = min(count, OLDMEM_SIZE - from);
-                               from += OLDMEM_BASE;
+                       if (oldmem_data.start && from - oldmem_data.size < oldmem_data.size) {
+                               from -= oldmem_data.size;
+                               len = min(count, oldmem_data.size - from);
+                       } else if (oldmem_data.start && from < oldmem_data.size) {
+                               len = min(count, oldmem_data.size - from);
+                               from += oldmem_data.start;
                        } else {
                                len = count;
                        }
@@ -243,10 +243,10 @@ static int remap_oldmem_pfn_range_kdump(struct vm_area_struct *vma,
        unsigned long size_old;
        int rc;
 
-       if (pfn < OLDMEM_SIZE >> PAGE_SHIFT) {
-               size_old = min(size, OLDMEM_SIZE - (pfn << PAGE_SHIFT));
+       if (pfn < oldmem_data.size >> PAGE_SHIFT) {
+               size_old = min(size, oldmem_data.size - (pfn << PAGE_SHIFT));
                rc = remap_pfn_range(vma, from,
-                                    pfn + (OLDMEM_BASE >> PAGE_SHIFT),
+                                    pfn + (oldmem_data.start >> PAGE_SHIFT),
                                     size_old, prot);
                if (rc || size == size_old)
                        return rc;
@@ -288,7 +288,7 @@ static int remap_oldmem_pfn_range_zfcpdump(struct vm_area_struct *vma,
 int remap_oldmem_pfn_range(struct vm_area_struct *vma, unsigned long from,
                           unsigned long pfn, unsigned long size, pgprot_t prot)
 {
-       if (OLDMEM_BASE)
+       if (oldmem_data.start)
                return remap_oldmem_pfn_range_kdump(vma, from, pfn, size, prot);
        else
                return remap_oldmem_pfn_range_zfcpdump(vma, from, pfn, size,
@@ -633,17 +633,17 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
        u64 hdr_off;
 
        /* If we are not in kdump or zfcp/nvme dump mode return */
-       if (!OLDMEM_BASE && !is_ipl_type_dump())
+       if (!oldmem_data.start && !is_ipl_type_dump())
                return 0;
        /* If we cannot get HSA size for zfcp/nvme dump return error */
        if (is_ipl_type_dump() && !sclp.hsa_size)
                return -ENODEV;
 
        /* For kdump, exclude previous crashkernel memory */
-       if (OLDMEM_BASE) {
-               oldmem_region.base = OLDMEM_BASE;
-               oldmem_region.size = OLDMEM_SIZE;
-               oldmem_type.total_size = OLDMEM_SIZE;
+       if (oldmem_data.start) {
+               oldmem_region.base = oldmem_data.start;
+               oldmem_region.size = oldmem_data.size;
+               oldmem_type.total_size = oldmem_data.size;
        }
 
        mem_chunk_cnt = get_mem_chunk_cnt();
index 09b6c64..4331c7e 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/export.h>
 #include <linux/init.h>
 #include <linux/fs.h>
+#include <linux/minmax.h>
 #include <linux/debugfs.h>
 
 #include <asm/debug.h>
@@ -92,6 +93,8 @@ static int debug_hex_ascii_format_fn(debug_info_t *id, struct debug_view *view,
                                     char *out_buf, const char *in_buf);
 static int debug_sprintf_format_fn(debug_info_t *id, struct debug_view *view,
                                   char *out_buf, debug_sprintf_entry_t *curr_event);
+static void debug_areas_swap(debug_info_t *a, debug_info_t *b);
+static void debug_events_append(debug_info_t *dest, debug_info_t *src);
 
 /* globals */
 
@@ -311,24 +314,6 @@ static debug_info_t *debug_info_create(const char *name, int pages_per_area,
                goto out;
 
        rc->mode = mode & ~S_IFMT;
-
-       /* create root directory */
-       rc->debugfs_root_entry = debugfs_create_dir(rc->name,
-                                                   debug_debugfs_root_entry);
-
-       /* append new element to linked list */
-       if (!debug_area_first) {
-               /* first element in list */
-               debug_area_first = rc;
-               rc->prev = NULL;
-       } else {
-               /* append element to end of list */
-               debug_area_last->next = rc;
-               rc->prev = debug_area_last;
-       }
-       debug_area_last = rc;
-       rc->next = NULL;
-
        refcount_set(&rc->ref_count, 1);
 out:
        return rc;
@@ -388,27 +373,10 @@ static void debug_info_get(debug_info_t *db_info)
  */
 static void debug_info_put(debug_info_t *db_info)
 {
-       int i;
-
        if (!db_info)
                return;
-       if (refcount_dec_and_test(&db_info->ref_count)) {
-               for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
-                       if (!db_info->views[i])
-                               continue;
-                       debugfs_remove(db_info->debugfs_entries[i]);
-               }
-               debugfs_remove(db_info->debugfs_root_entry);
-               if (db_info == debug_area_first)
-                       debug_area_first = db_info->next;
-               if (db_info == debug_area_last)
-                       debug_area_last = db_info->prev;
-               if (db_info->prev)
-                       db_info->prev->next = db_info->next;
-               if (db_info->next)
-                       db_info->next->prev = db_info->prev;
+       if (refcount_dec_and_test(&db_info->ref_count))
                debug_info_free(db_info);
-       }
 }
 
 /*
@@ -632,6 +600,31 @@ static int debug_close(struct inode *inode, struct file *file)
        return 0; /* success */
 }
 
+/* Create debugfs entries and add to internal list. */
+static void _debug_register(debug_info_t *id)
+{
+       /* create root directory */
+       id->debugfs_root_entry = debugfs_create_dir(id->name,
+                                                   debug_debugfs_root_entry);
+
+       /* append new element to linked list */
+       if (!debug_area_first) {
+               /* first element in list */
+               debug_area_first = id;
+               id->prev = NULL;
+       } else {
+               /* append element to end of list */
+               debug_area_last->next = id;
+               id->prev = debug_area_last;
+       }
+       debug_area_last = id;
+       id->next = NULL;
+
+       debug_register_view(id, &debug_level_view);
+       debug_register_view(id, &debug_flush_view);
+       debug_register_view(id, &debug_pages_view);
+}
+
 /**
  * debug_register_mode() - creates and initializes debug area.
  *
@@ -661,19 +654,16 @@ debug_info_t *debug_register_mode(const char *name, int pages_per_area,
        if ((uid != 0) || (gid != 0))
                pr_warn("Root becomes the owner of all s390dbf files in sysfs\n");
        BUG_ON(!initialized);
-       mutex_lock(&debug_mutex);
 
        /* create new debug_info */
        rc = debug_info_create(name, pages_per_area, nr_areas, buf_size, mode);
-       if (!rc)
-               goto out;
-       debug_register_view(rc, &debug_level_view);
-       debug_register_view(rc, &debug_flush_view);
-       debug_register_view(rc, &debug_pages_view);
-out:
-       if (!rc)
+       if (rc) {
+               mutex_lock(&debug_mutex);
+               _debug_register(rc);
+               mutex_unlock(&debug_mutex);
+       } else {
                pr_err("Registering debug feature %s failed\n", name);
-       mutex_unlock(&debug_mutex);
+       }
        return rc;
 }
 EXPORT_SYMBOL(debug_register_mode);
@@ -702,6 +692,82 @@ debug_info_t *debug_register(const char *name, int pages_per_area,
 }
 EXPORT_SYMBOL(debug_register);
 
+/**
+ * debug_register_static() - registers a static debug area
+ *
+ * @id: Handle for static debug area
+ * @pages_per_area: Number of pages per area
+ * @nr_areas: Number of debug areas
+ *
+ * Register debug_info_t defined using DEFINE_STATIC_DEBUG_INFO.
+ *
+ * Note: This function is called automatically via an initcall generated by
+ *      DEFINE_STATIC_DEBUG_INFO.
+ */
+void debug_register_static(debug_info_t *id, int pages_per_area, int nr_areas)
+{
+       unsigned long flags;
+       debug_info_t *copy;
+
+       if (!initialized) {
+               pr_err("Tried to register debug feature %s too early\n",
+                      id->name);
+               return;
+       }
+
+       copy = debug_info_alloc("", pages_per_area, nr_areas, id->buf_size,
+                               id->level, ALL_AREAS);
+       if (!copy) {
+               pr_err("Registering debug feature %s failed\n", id->name);
+
+               /* Clear pointers to prevent tracing into released initdata. */
+               spin_lock_irqsave(&id->lock, flags);
+               id->areas = NULL;
+               id->active_pages = NULL;
+               id->active_entries = NULL;
+               spin_unlock_irqrestore(&id->lock, flags);
+
+               return;
+       }
+
+       /* Replace static trace area with dynamic copy. */
+       spin_lock_irqsave(&id->lock, flags);
+       debug_events_append(copy, id);
+       debug_areas_swap(id, copy);
+       spin_unlock_irqrestore(&id->lock, flags);
+
+       /* Clear pointers to initdata and discard copy. */
+       copy->areas = NULL;
+       copy->active_pages = NULL;
+       copy->active_entries = NULL;
+       debug_info_free(copy);
+
+       mutex_lock(&debug_mutex);
+       _debug_register(id);
+       mutex_unlock(&debug_mutex);
+}
+
+/* Remove debugfs entries and remove from internal list. */
+static void _debug_unregister(debug_info_t *id)
+{
+       int i;
+
+       for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
+               if (!id->views[i])
+                       continue;
+               debugfs_remove(id->debugfs_entries[i]);
+       }
+       debugfs_remove(id->debugfs_root_entry);
+       if (id == debug_area_first)
+               debug_area_first = id->next;
+       if (id == debug_area_last)
+               debug_area_last = id->prev;
+       if (id->prev)
+               id->prev->next = id->next;
+       if (id->next)
+               id->next->prev = id->prev;
+}
+
 /**
  * debug_unregister() - give back debug area.
  *
@@ -715,8 +781,10 @@ void debug_unregister(debug_info_t *id)
        if (!id)
                return;
        mutex_lock(&debug_mutex);
-       debug_info_put(id);
+       _debug_unregister(id);
        mutex_unlock(&debug_mutex);
+
+       debug_info_put(id);
 }
 EXPORT_SYMBOL(debug_unregister);
 
@@ -726,35 +794,28 @@ EXPORT_SYMBOL(debug_unregister);
  */
 static int debug_set_size(debug_info_t *id, int nr_areas, int pages_per_area)
 {
-       debug_entry_t ***new_areas;
+       debug_info_t *new_id;
        unsigned long flags;
-       int rc = 0;
 
        if (!id || (nr_areas <= 0) || (pages_per_area < 0))
                return -EINVAL;
-       if (pages_per_area > 0) {
-               new_areas = debug_areas_alloc(pages_per_area, nr_areas);
-               if (!new_areas) {
-                       pr_info("Allocating memory for %i pages failed\n",
-                               pages_per_area);
-                       rc = -ENOMEM;
-                       goto out;
-               }
-       } else {
-               new_areas = NULL;
+
+       new_id = debug_info_alloc("", pages_per_area, nr_areas, id->buf_size,
+                                 id->level, ALL_AREAS);
+       if (!new_id) {
+               pr_info("Allocating memory for %i pages failed\n",
+                       pages_per_area);
+               return -ENOMEM;
        }
+
        spin_lock_irqsave(&id->lock, flags);
-       debug_areas_free(id);
-       id->areas = new_areas;
-       id->nr_areas = nr_areas;
-       id->pages_per_area = pages_per_area;
-       id->active_area = 0;
-       memset(id->active_entries, 0, sizeof(int)*id->nr_areas);
-       memset(id->active_pages, 0, sizeof(int)*id->nr_areas);
+       debug_events_append(new_id, id);
+       debug_areas_swap(new_id, id);
+       debug_info_free(new_id);
        spin_unlock_irqrestore(&id->lock, flags);
        pr_info("%s: set new size (%i pages)\n", id->name, pages_per_area);
-out:
-       return rc;
+
+       return 0;
 }
 
 /**
@@ -772,16 +833,17 @@ void debug_set_level(debug_info_t *id, int new_level)
 
        if (!id)
                return;
-       spin_lock_irqsave(&id->lock, flags);
+
        if (new_level == DEBUG_OFF_LEVEL) {
-               id->level = DEBUG_OFF_LEVEL;
                pr_info("%s: switched off\n", id->name);
        } else if ((new_level > DEBUG_MAX_LEVEL) || (new_level < 0)) {
                pr_info("%s: level %i is out of range (%i - %i)\n",
                        id->name, new_level, 0, DEBUG_MAX_LEVEL);
-       } else {
-               id->level = new_level;
+               return;
        }
+
+       spin_lock_irqsave(&id->lock, flags);
+       id->level = new_level;
        spin_unlock_irqrestore(&id->lock, flags);
 }
 EXPORT_SYMBOL(debug_set_level);
@@ -821,6 +883,42 @@ static inline debug_entry_t *get_active_entry(debug_info_t *id)
                                  id->active_entries[id->active_area]);
 }
 
+/* Swap debug areas of a and b. */
+static void debug_areas_swap(debug_info_t *a, debug_info_t *b)
+{
+       swap(a->nr_areas, b->nr_areas);
+       swap(a->pages_per_area, b->pages_per_area);
+       swap(a->areas, b->areas);
+       swap(a->active_area, b->active_area);
+       swap(a->active_pages, b->active_pages);
+       swap(a->active_entries, b->active_entries);
+}
+
+/* Append all debug events in active area from source to destination log. */
+static void debug_events_append(debug_info_t *dest, debug_info_t *src)
+{
+       debug_entry_t *from, *to, *last;
+
+       if (!src->areas || !dest->areas)
+               return;
+
+       /* Loop over all entries in src, starting with oldest. */
+       from = get_active_entry(src);
+       last = from;
+       do {
+               if (from->clock != 0LL) {
+                       to = get_active_entry(dest);
+                       memset(to, 0, dest->entry_size);
+                       memcpy(to, from, min(src->entry_size,
+                                            dest->entry_size));
+                       proceed_active_entry(dest);
+               }
+
+               proceed_active_entry(src);
+               from = get_active_entry(src);
+       } while (from != last);
+}
+
 /*
  * debug_finish_entry:
  * - set timestamp, caller address, cpu number etc.
@@ -1111,16 +1209,17 @@ int debug_register_view(debug_info_t *id, struct debug_view *view)
                        break;
        }
        if (i == DEBUG_MAX_VIEWS) {
-               pr_err("Registering view %s/%s would exceed the maximum "
-                      "number of views %i\n", id->name, view->name, i);
                rc = -1;
        } else {
                id->views[i] = view;
                id->debugfs_entries[i] = pde;
        }
        spin_unlock_irqrestore(&id->lock, flags);
-       if (rc)
+       if (rc) {
+               pr_err("Registering view %s/%s would exceed the maximum "
+                      "number of views %i\n", id->name, view->name, i);
                debugfs_remove(pde);
+       }
 out:
        return rc;
 }
index a3f4746..76a656b 100644 (file)
@@ -14,6 +14,7 @@
 #include <asm/diag.h>
 #include <asm/trace/diag.h>
 #include <asm/sections.h>
+#include "entry.h"
 
 struct diag_stat {
        unsigned int counter[NR_DIAG_STAT];
@@ -50,8 +51,16 @@ static const struct diag_desc diag_map[NR_DIAG_STAT] = {
        [DIAG_STAT_X500] = { .code = 0x500, .name = "Virtio Service" },
 };
 
-struct diag_ops __bootdata_preserved(diag_dma_ops);
-struct diag210 *__bootdata_preserved(__diag210_tmp_dma);
+struct diag_ops __amode31_ref diag_amode31_ops = {
+       .diag210 = _diag210_amode31,
+       .diag26c = _diag26c_amode31,
+       .diag14 = _diag14_amode31,
+       .diag0c = _diag0c_amode31,
+       .diag308_reset = _diag308_reset_amode31
+};
+
+static struct diag210 _diag210_tmp_amode31 __section(".amode31.data");
+struct diag210 __amode31_ref *__diag210_tmp_amode31 = &_diag210_tmp_amode31;
 
 static int show_diag_stat(struct seq_file *m, void *v)
 {
@@ -59,7 +68,7 @@ static int show_diag_stat(struct seq_file *m, void *v)
        unsigned long n = (unsigned long) v - 1;
        int cpu, prec, tmp;
 
-       get_online_cpus();
+       cpus_read_lock();
        if (n == 0) {
                seq_puts(m, "         ");
 
@@ -78,7 +87,7 @@ static int show_diag_stat(struct seq_file *m, void *v)
                }
                seq_printf(m, "    %s\n", diag_map[n-1].name);
        }
-       put_online_cpus();
+       cpus_read_unlock();
        return 0;
 }
 
@@ -135,7 +144,7 @@ EXPORT_SYMBOL(diag_stat_inc_norecursion);
 int diag14(unsigned long rx, unsigned long ry1, unsigned long subcode)
 {
        diag_stat_inc(DIAG_STAT_X014);
-       return diag_dma_ops.diag14(rx, ry1, subcode);
+       return diag_amode31_ops.diag14(rx, ry1, subcode);
 }
 EXPORT_SYMBOL(diag14);
 
@@ -172,12 +181,12 @@ int diag210(struct diag210 *addr)
        int ccode;
 
        spin_lock_irqsave(&diag210_lock, flags);
-       *__diag210_tmp_dma = *addr;
+       *__diag210_tmp_amode31 = *addr;
 
        diag_stat_inc(DIAG_STAT_X210);
-       ccode = diag_dma_ops.diag210(__diag210_tmp_dma);
+       ccode = diag_amode31_ops.diag210(__diag210_tmp_amode31);
 
-       *addr = *__diag210_tmp_dma;
+       *addr = *__diag210_tmp_amode31;
        spin_unlock_irqrestore(&diag210_lock, flags);
 
        return ccode;
@@ -205,6 +214,6 @@ EXPORT_SYMBOL(diag224);
 int diag26c(void *req, void *resp, enum diag26c_sc subcode)
 {
        diag_stat_inc(DIAG_STAT_X26C);
-       return diag_dma_ops.diag26c(req, resp, subcode);
+       return diag_amode31_ops.diag26c(req, resp, subcode);
 }
 EXPORT_SYMBOL(diag26c);
index 5412efe..ec55154 100644 (file)
@@ -312,10 +312,12 @@ static const unsigned char formats[][6] = {
        [INSTR_VRR_VV]       = { V_8, V_12, 0, 0, 0, 0 },
        [INSTR_VRR_VV0U]     = { V_8, V_12, U4_32, 0, 0, 0 },
        [INSTR_VRR_VV0U0U]   = { V_8, V_12, U4_32, U4_24, 0, 0 },
+       [INSTR_VRR_VV0U2]    = { V_8, V_12, U4_24, 0, 0, 0 },
        [INSTR_VRR_VV0UU2]   = { V_8, V_12, U4_32, U4_28, 0, 0 },
        [INSTR_VRR_VV0UUU]   = { V_8, V_12, U4_32, U4_28, U4_24, 0 },
        [INSTR_VRR_VVV]      = { V_8, V_12, V_16, 0, 0, 0 },
        [INSTR_VRR_VVV0U]    = { V_8, V_12, V_16, U4_32, 0, 0 },
+       [INSTR_VRR_VVV0U0]   = { V_8, V_12, V_16, U4_24, 0, 0 },
        [INSTR_VRR_VVV0U0U]  = { V_8, V_12, V_16, U4_32, U4_24, 0 },
        [INSTR_VRR_VVV0UU]   = { V_8, V_12, V_16, U4_32, U4_28, 0 },
        [INSTR_VRR_VVV0UUU]  = { V_8, V_12, V_16, U4_32, U4_28, U4_24 },
index fb84e3f..9857cb0 100644 (file)
@@ -236,6 +236,10 @@ static __init void detect_machine_facilities(void)
                clock_comparator_max = -1ULL >> 1;
                __ctl_set_bit(0, 53);
        }
+       if (IS_ENABLED(CONFIG_PCI) && test_facility(153)) {
+               S390_lowcore.machine_flags |= MACHINE_FLAG_PCI_MIO;
+               /* the control bit is set during PCI initialization */
+       }
 }
 
 static inline void save_vector_registers(void)
index 5a2f70c..b9716a7 100644 (file)
@@ -624,12 +624,15 @@ ENTRY(mcck_int_handler)
 4:     j       4b
 ENDPROC(mcck_int_handler)
 
-#
-# PSW restart interrupt handler
-#
 ENTRY(restart_int_handler)
        ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
        stg     %r15,__LC_SAVE_AREA_RESTART
+       TSTMSK  __LC_RESTART_FLAGS,RESTART_FLAG_CTLREGS,4
+       jz      0f
+       la      %r15,4095
+       lctlg   %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r15)
+0:     larl    %r15,.Lstosm_tmp
+       stosm   0(%r15),0x04                    # turn dat on, keep irqs off
        lg      %r15,__LC_RESTART_STACK
        xc      STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15)
        stmg    %r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
@@ -638,7 +641,7 @@ ENTRY(restart_int_handler)
        xc      0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
        lg      %r1,__LC_RESTART_FN             # load fn, parm & source cpu
        lg      %r2,__LC_RESTART_DATA
-       lg      %r3,__LC_RESTART_SOURCE
+       lgf     %r3,__LC_RESTART_SOURCE
        ltgr    %r3,%r3                         # test source cpu address
        jm      1f                              # negative -> skip source stop
 0:     sigp    %r4,%r3,SIGP_SENSE              # sigp sense to source cpu
index 1ab3346..7f2696e 100644 (file)
@@ -28,10 +28,8 @@ void do_non_secure_storage_access(struct pt_regs *regs);
 void do_secure_storage_violation(struct pt_regs *regs);
 void do_report_trap(struct pt_regs *regs, int si_signo, int si_code, char *str);
 void kernel_stack_overflow(struct pt_regs * regs);
-void do_signal(struct pt_regs *regs);
 void handle_signal32(struct ksignal *ksig, sigset_t *oldset,
                     struct pt_regs *regs);
-void do_notify_resume(struct pt_regs *regs);
 
 void __init init_IRQ(void);
 void do_io_irq(struct pt_regs *regs);
@@ -64,4 +62,13 @@ void stack_free(unsigned long stack);
 
 extern char kprobes_insn_page[];
 
+extern char _samode31[], _eamode31[];
+extern char _stext_amode31[], _etext_amode31[];
+extern struct exception_table_entry _start_amode31_ex_table[];
+extern struct exception_table_entry _stop_amode31_ex_table[];
+
+#define __amode31_data __section(".amode31.data")
+#define __amode31_ref __section(".amode31.refs")
+extern long _start_amode31_refs[], _end_amode31_refs[];
+
 #endif /* _ENTRY_H */
index 2d8f595..0a464d3 100644 (file)
 #include <trace/syscall.h>
 #include <asm/asm-offsets.h>
 #include <asm/cacheflush.h>
+#include <asm/ftrace.lds.h>
+#include <asm/nospec-branch.h>
 #include <asm/set_memory.h>
 #include "entry.h"
+#include "ftrace.h"
 
 /*
  * To generate function prologue either gcc's hotpatch feature (since gcc 4.8)
  */
 
 void *ftrace_func __read_mostly = ftrace_stub;
-unsigned long ftrace_plt;
+struct ftrace_insn {
+       u16 opc;
+       s32 disp;
+} __packed;
+
+asm(
+       "       .align 16\n"
+       "ftrace_shared_hotpatch_trampoline_br:\n"
+       "       lmg     %r0,%r1,2(%r1)\n"
+       "       br      %r1\n"
+       "ftrace_shared_hotpatch_trampoline_br_end:\n"
+);
+
+#ifdef CONFIG_EXPOLINE
+asm(
+       "       .align 16\n"
+       "ftrace_shared_hotpatch_trampoline_ex:\n"
+       "       lmg     %r0,%r1,2(%r1)\n"
+       "       ex      %r0," __stringify(__LC_BR_R1) "(%r0)\n"
+       "       j       .\n"
+       "ftrace_shared_hotpatch_trampoline_ex_end:\n"
+);
+
+asm(
+       "       .align 16\n"
+       "ftrace_shared_hotpatch_trampoline_exrl:\n"
+       "       lmg     %r0,%r1,2(%r1)\n"
+       "       .insn   ril,0xc60000000000,%r0,0f\n" /* exrl */
+       "       j       .\n"
+       "0:     br      %r1\n"
+       "ftrace_shared_hotpatch_trampoline_exrl_end:\n"
+);
+#endif /* CONFIG_EXPOLINE */
+
+#ifdef CONFIG_MODULES
+static char *ftrace_plt;
+
+asm(
+       "       .data\n"
+       "ftrace_plt_template:\n"
+       "       basr    %r1,%r0\n"
+       "       lg      %r1,0f-.(%r1)\n"
+       "       br      %r1\n"
+       "0:     .quad   ftrace_caller\n"
+       "ftrace_plt_template_end:\n"
+       "       .previous\n"
+);
+#endif /* CONFIG_MODULES */
+
+static const char *ftrace_shared_hotpatch_trampoline(const char **end)
+{
+       const char *tstart, *tend;
+
+       tstart = ftrace_shared_hotpatch_trampoline_br;
+       tend = ftrace_shared_hotpatch_trampoline_br_end;
+#ifdef CONFIG_EXPOLINE
+       if (!nospec_disable) {
+               tstart = ftrace_shared_hotpatch_trampoline_ex;
+               tend = ftrace_shared_hotpatch_trampoline_ex_end;
+               if (test_facility(35)) { /* exrl */
+                       tstart = ftrace_shared_hotpatch_trampoline_exrl;
+                       tend = ftrace_shared_hotpatch_trampoline_exrl_end;
+               }
+       }
+#endif /* CONFIG_EXPOLINE */
+       if (end)
+               *end = tend;
+       return tstart;
+}
+
+bool ftrace_need_init_nop(void)
+{
+       return ftrace_shared_hotpatch_trampoline(NULL);
+}
+
+int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
+{
+       static struct ftrace_hotpatch_trampoline *next_vmlinux_trampoline =
+               __ftrace_hotpatch_trampolines_start;
+       static const char orig[6] = { 0xc0, 0x04, 0x00, 0x00, 0x00, 0x00 };
+       static struct ftrace_hotpatch_trampoline *trampoline;
+       struct ftrace_hotpatch_trampoline **next_trampoline;
+       struct ftrace_hotpatch_trampoline *trampolines_end;
+       struct ftrace_hotpatch_trampoline tmp;
+       struct ftrace_insn *insn;
+       const char *shared;
+       s32 disp;
+
+       BUILD_BUG_ON(sizeof(struct ftrace_hotpatch_trampoline) !=
+                    SIZEOF_FTRACE_HOTPATCH_TRAMPOLINE);
+
+       next_trampoline = &next_vmlinux_trampoline;
+       trampolines_end = __ftrace_hotpatch_trampolines_end;
+       shared = ftrace_shared_hotpatch_trampoline(NULL);
+#ifdef CONFIG_MODULES
+       if (mod) {
+               next_trampoline = &mod->arch.next_trampoline;
+               trampolines_end = mod->arch.trampolines_end;
+               shared = ftrace_plt;
+       }
+#endif
+
+       if (WARN_ON_ONCE(*next_trampoline >= trampolines_end))
+               return -ENOMEM;
+       trampoline = (*next_trampoline)++;
+
+       /* Check for the compiler-generated fentry nop (brcl 0, .). */
+       if (WARN_ON_ONCE(memcmp((const void *)rec->ip, &orig, sizeof(orig))))
+               return -EINVAL;
+
+       /* Generate the trampoline. */
+       tmp.brasl_opc = 0xc015; /* brasl %r1, shared */
+       tmp.brasl_disp = (shared - (const char *)&trampoline->brasl_opc) / 2;
+       tmp.interceptor = FTRACE_ADDR;
+       tmp.rest_of_intercepted_function = rec->ip + sizeof(struct ftrace_insn);
+       s390_kernel_write(trampoline, &tmp, sizeof(tmp));
+
+       /* Generate a jump to the trampoline. */
+       disp = ((char *)trampoline - (char *)rec->ip) / 2;
+       insn = (struct ftrace_insn *)rec->ip;
+       s390_kernel_write(&insn->disp, &disp, sizeof(disp));
+
+       return 0;
+}
 
 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
                       unsigned long addr)
@@ -49,11 +175,45 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
        return 0;
 }
 
+static void ftrace_generate_nop_insn(struct ftrace_insn *insn)
+{
+       /* brcl 0,0 */
+       insn->opc = 0xc004;
+       insn->disp = 0;
+}
+
+static void ftrace_generate_call_insn(struct ftrace_insn *insn,
+                                     unsigned long ip)
+{
+       unsigned long target;
+
+       /* brasl r0,ftrace_caller */
+       target = FTRACE_ADDR;
+#ifdef CONFIG_MODULES
+       if (is_module_addr((void *)ip))
+               target = (unsigned long)ftrace_plt;
+#endif /* CONFIG_MODULES */
+       insn->opc = 0xc005;
+       insn->disp = (target - ip) / 2;
+}
+
+static void brcl_disable(void *brcl)
+{
+       u8 op = 0x04; /* set mask field to zero */
+
+       s390_kernel_write((char *)brcl + 1, &op, sizeof(op));
+}
+
 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
                    unsigned long addr)
 {
        struct ftrace_insn orig, new, old;
 
+       if (ftrace_shared_hotpatch_trampoline(NULL)) {
+               brcl_disable((void *)rec->ip);
+               return 0;
+       }
+
        if (copy_from_kernel_nofault(&old, (void *) rec->ip, sizeof(old)))
                return -EFAULT;
        /* Replace ftrace call with a nop. */
@@ -67,10 +227,22 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
        return 0;
 }
 
+static void brcl_enable(void *brcl)
+{
+       u8 op = 0xf4; /* set mask field to all ones */
+
+       s390_kernel_write((char *)brcl + 1, &op, sizeof(op));
+}
+
 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 {
        struct ftrace_insn orig, new, old;
 
+       if (ftrace_shared_hotpatch_trampoline(NULL)) {
+               brcl_enable((void *)rec->ip);
+               return 0;
+       }
+
        if (copy_from_kernel_nofault(&old, (void *) rec->ip, sizeof(old)))
                return -EFAULT;
        /* Replace nop with an ftrace call. */
@@ -95,22 +267,44 @@ int __init ftrace_dyn_arch_init(void)
        return 0;
 }
 
+void arch_ftrace_update_code(int command)
+{
+       if (ftrace_shared_hotpatch_trampoline(NULL))
+               ftrace_modify_all_code(command);
+       else
+               ftrace_run_stop_machine(command);
+}
+
+static void __ftrace_sync(void *dummy)
+{
+}
+
+int ftrace_arch_code_modify_post_process(void)
+{
+       if (ftrace_shared_hotpatch_trampoline(NULL)) {
+               /* Send SIGP to the other CPUs, so they see the new code. */
+               smp_call_function(__ftrace_sync, NULL, 1);
+       }
+       return 0;
+}
+
 #ifdef CONFIG_MODULES
 
 static int __init ftrace_plt_init(void)
 {
-       unsigned int *ip;
+       const char *start, *end;
 
-       ftrace_plt = (unsigned long) module_alloc(PAGE_SIZE);
+       ftrace_plt = module_alloc(PAGE_SIZE);
        if (!ftrace_plt)
                panic("cannot allocate ftrace plt\n");
-       ip = (unsigned int *) ftrace_plt;
-       ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
-       ip[1] = 0x100a0004;
-       ip[2] = 0x07f10000;
-       ip[3] = FTRACE_ADDR >> 32;
-       ip[4] = FTRACE_ADDR & 0xffffffff;
-       set_memory_ro(ftrace_plt, 1);
+
+       start = ftrace_shared_hotpatch_trampoline(&end);
+       if (!start) {
+               start = ftrace_plt_template;
+               end = ftrace_plt_template_end;
+       }
+       memcpy(ftrace_plt, start, end - start);
+       set_memory_ro((unsigned long)ftrace_plt, 1);
        return 0;
 }
 device_initcall(ftrace_plt_init);
@@ -147,17 +341,13 @@ NOKPROBE_SYMBOL(prepare_ftrace_return);
  */
 int ftrace_enable_ftrace_graph_caller(void)
 {
-       u8 op = 0x04; /* set mask field to zero */
-
-       s390_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
+       brcl_disable(__va(ftrace_graph_caller));
        return 0;
 }
 
 int ftrace_disable_ftrace_graph_caller(void)
 {
-       u8 op = 0xf4; /* set mask field to all ones */
-
-       s390_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
+       brcl_enable(__va(ftrace_graph_caller));
        return 0;
 }
 
diff --git a/arch/s390/kernel/ftrace.h b/arch/s390/kernel/ftrace.h
new file mode 100644 (file)
index 0000000..69e416f
--- /dev/null
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _FTRACE_H
+#define _FTRACE_H
+
+#include <asm/types.h>
+
+struct ftrace_hotpatch_trampoline {
+       u16 brasl_opc;
+       s32 brasl_disp;
+       s16: 16;
+       u64 rest_of_intercepted_function;
+       u64 interceptor;
+} __packed;
+
+extern struct ftrace_hotpatch_trampoline __ftrace_hotpatch_trampolines_start[];
+extern struct ftrace_hotpatch_trampoline __ftrace_hotpatch_trampolines_end[];
+extern const char ftrace_shared_hotpatch_trampoline_br[];
+extern const char ftrace_shared_hotpatch_trampoline_br_end[];
+extern const char ftrace_shared_hotpatch_trampoline_ex[];
+extern const char ftrace_shared_hotpatch_trampoline_ex_end[];
+extern const char ftrace_shared_hotpatch_trampoline_exrl[];
+extern const char ftrace_shared_hotpatch_trampoline_exrl_end[];
+extern const char ftrace_plt_template[];
+extern const char ftrace_plt_template_end[];
+
+#endif /* _FTRACE_H */
index 0c25388..114b549 100644 (file)
@@ -21,6 +21,7 @@ ENTRY(startup_continue)
        larl    %r1,tod_clock_base
        mvc     0(16,%r1),__LC_BOOT_CLOCK
        larl    %r13,.LPG1              # get base
+       lctlg   %c0,%c15,.Lctl-.LPG1(%r13)      # load control registers
 #
 # Setup stack
 #
@@ -41,3 +42,19 @@ ENTRY(startup_continue)
        .align  16
 .LPG1:
 .Ldw:  .quad   0x0002000180000000,0x0000000000000000
+.Lctl: .quad   0x04040000              # cr0: AFP registers & secondary space
+       .quad   0                       # cr1: primary space segment table
+       .quad   0                       # cr2: dispatchable unit control table
+       .quad   0                       # cr3: instruction authorization
+       .quad   0xffff                  # cr4: instruction authorization
+       .quad   0                       # cr5: primary-aste origin
+       .quad   0                       # cr6: I/O interrupts
+       .quad   0                       # cr7: secondary space segment table
+       .quad   0x0000000000008000      # cr8: access registers translation
+       .quad   0                       # cr9: tracing off
+       .quad   0                       # cr10: tracing off
+       .quad   0                       # cr11: tracing off
+       .quad   0                       # cr12: tracing off
+       .quad   0                       # cr13: home space segment table
+       .quad   0xc0000000              # cr14: machine check handling off
+       .quad   0                       # cr15: linkage stack operations
index 50e2c21..e2cc357 100644 (file)
@@ -179,8 +179,6 @@ static inline int __diag308(unsigned long subcode, void *addr)
 
 int diag308(unsigned long subcode, void *addr)
 {
-       if (IS_ENABLED(CONFIG_KASAN))
-               __arch_local_irq_stosm(0x04); /* enable DAT */
        diag_stat_inc(DIAG_STAT_X308);
        return __diag308(subcode, addr);
 }
@@ -1843,7 +1841,6 @@ static struct kobj_attribute on_restart_attr = __ATTR_RW(on_restart);
 
 static void __do_restart(void *ignore)
 {
-       __arch_local_irq_stosm(0x04); /* enable DAT */
        smp_send_stop();
 #ifdef CONFIG_CRASH_DUMP
        crash_kexec(NULL);
@@ -2082,7 +2079,7 @@ void s390_reset_system(void)
 
        /* Disable lowcore protection */
        __ctl_clear_bit(0, 28);
-       diag_dma_ops.diag308_reset();
+       diag_amode31_ops.diag308_reset();
 }
 
 #ifdef CONFIG_KEXEC_FILE
index af43535..b5245fa 100644 (file)
@@ -1,4 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
+#include <linux/minmax.h>
+#include <linux/string.h>
 #include <asm/ebcdic.h>
 #include <asm/ipl.h>
 
index 234d085..3a3145c 100644 (file)
@@ -228,7 +228,7 @@ int show_interrupts(struct seq_file *p, void *v)
        int index = *(loff_t *) v;
        int cpu, irq;
 
-       get_online_cpus();
+       cpus_read_lock();
        if (index == 0) {
                seq_puts(p, "           ");
                for_each_online_cpu(cpu)
@@ -258,7 +258,7 @@ int show_interrupts(struct seq_file *p, void *v)
                seq_putc(p, '\n');
        }
 out:
-       put_online_cpus();
+       cpus_read_unlock();
        return 0;
 }
 
index ab584e8..9156653 100644 (file)
@@ -36,7 +36,7 @@ static void jump_label_bug(struct jump_entry *entry, struct insn *expected,
        unsigned char *ipe = (unsigned char *)expected;
        unsigned char *ipn = (unsigned char *)new;
 
-       pr_emerg("Jump label code mismatch at %pS [%p]\n", ipc, ipc);
+       pr_emerg("Jump label code mismatch at %pS [%px]\n", ipc, ipc);
        pr_emerg("Found:    %6ph\n", ipc);
        pr_emerg("Expected: %6ph\n", ipe);
        pr_emerg("New:      %6ph\n", ipn);
index 1005a69..0505e55 100644 (file)
@@ -224,8 +224,8 @@ void arch_crash_save_vmcoreinfo(void)
        VMCOREINFO_SYMBOL(lowcore_ptr);
        VMCOREINFO_SYMBOL(high_memory);
        VMCOREINFO_LENGTH(lowcore_ptr, NR_CPUS);
-       vmcoreinfo_append_str("SDMA=%lx\n", __sdma);
-       vmcoreinfo_append_str("EDMA=%lx\n", __edma);
+       vmcoreinfo_append_str("SAMODE31=%lx\n", __samode31);
+       vmcoreinfo_append_str("EAMODE31=%lx\n", __eamode31);
        vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
        mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note());
 }
@@ -263,7 +263,6 @@ static void __do_machine_kexec(void *data)
  */
 static void __machine_kexec(void *data)
 {
-       __arch_local_irq_stosm(0x04); /* enable DAT */
        pfault_fini();
        tracing_off();
        debug_locks_off();
index 4055f1c..b01ba46 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/elf.h>
 #include <linux/vmalloc.h>
 #include <linux/fs.h>
+#include <linux/ftrace.h>
 #include <linux/string.h>
 #include <linux/kernel.h>
 #include <linux/kasan.h>
@@ -23,6 +24,8 @@
 #include <asm/alternative.h>
 #include <asm/nospec-branch.h>
 #include <asm/facility.h>
+#include <asm/ftrace.lds.h>
+#include <asm/set_memory.h>
 
 #if 0
 #define DEBUGP printk
@@ -48,6 +51,13 @@ void *module_alloc(unsigned long size)
        return p;
 }
 
+#ifdef CONFIG_FUNCTION_TRACER
+void module_arch_cleanup(struct module *mod)
+{
+       module_memfree(mod->arch.trampolines_start);
+}
+#endif
+
 void module_arch_freeing_init(struct module *mod)
 {
        if (is_livepatch_module(mod) &&
@@ -466,6 +476,30 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
                                    write);
 }
 
+#ifdef CONFIG_FUNCTION_TRACER
+static int module_alloc_ftrace_hotpatch_trampolines(struct module *me,
+                                                   const Elf_Shdr *s)
+{
+       char *start, *end;
+       int numpages;
+       size_t size;
+
+       size = FTRACE_HOTPATCH_TRAMPOLINES_SIZE(s->sh_size);
+       numpages = DIV_ROUND_UP(size, PAGE_SIZE);
+       start = module_alloc(numpages * PAGE_SIZE);
+       if (!start)
+               return -ENOMEM;
+       set_memory_ro((unsigned long)start, numpages);
+       end = start + size;
+
+       me->arch.trampolines_start = (struct ftrace_hotpatch_trampoline *)start;
+       me->arch.trampolines_end = (struct ftrace_hotpatch_trampoline *)end;
+       me->arch.next_trampoline = me->arch.trampolines_start;
+
+       return 0;
+}
+#endif /* CONFIG_FUNCTION_TRACER */
+
 int module_finalize(const Elf_Ehdr *hdr,
                    const Elf_Shdr *sechdrs,
                    struct module *me)
@@ -473,6 +507,9 @@ int module_finalize(const Elf_Ehdr *hdr,
        const Elf_Shdr *s;
        char *secstrings, *secname;
        void *aseg;
+#ifdef CONFIG_FUNCTION_TRACER
+       int ret;
+#endif
 
        if (IS_ENABLED(CONFIG_EXPOLINE) &&
            !nospec_disable && me->arch.plt_size) {
@@ -507,6 +544,14 @@ int module_finalize(const Elf_Ehdr *hdr,
                if (IS_ENABLED(CONFIG_EXPOLINE) &&
                    (str_has_prefix(secname, ".s390_return")))
                        nospec_revert(aseg, aseg + s->sh_size);
+
+#ifdef CONFIG_FUNCTION_TRACER
+               if (!strcmp(FTRACE_CALLSITE_SECTION, secname)) {
+                       ret = module_alloc_ftrace_hotpatch_trampolines(me, s);
+                       if (ret < 0)
+                               return ret;
+               }
+#endif /* CONFIG_FUNCTION_TRACER */
        }
 
        jump_label_apply_nops(me);
index 5a7420b..4bef35b 100644 (file)
@@ -121,7 +121,7 @@ static void os_info_old_init(void)
 
        if (os_info_init)
                return;
-       if (!OLDMEM_BASE)
+       if (!oldmem_data.start)
                goto fail;
        if (copy_oldmem_kernel(&addr, &S390_lowcore.os_info, sizeof(addr)))
                goto fail;
index d7dc36e..2e3bb63 100644 (file)
@@ -1138,7 +1138,7 @@ static long cfset_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
        int ret;
 
-       get_online_cpus();
+       cpus_read_lock();
        mutex_lock(&cfset_ctrset_mutex);
        switch (cmd) {
        case S390_HWCTR_START:
@@ -1155,7 +1155,7 @@ static long cfset_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                break;
        }
        mutex_unlock(&cfset_ctrset_mutex);
-       put_online_cpus();
+       cpus_read_unlock();
        return ret;
 }
 
index 82df39b..d9d4a80 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/cpufeature.h>
 #include <linux/bitops.h>
 #include <linux/kernel.h>
+#include <linux/random.h>
 #include <linux/sched/mm.h>
 #include <linux/init.h>
 #include <linux/seq_file.h>
 #include <asm/elf.h>
 #include <asm/lowcore.h>
 #include <asm/param.h>
+#include <asm/sclp.h>
 #include <asm/smp.h>
 
+unsigned long __read_mostly elf_hwcap;
+char elf_platform[ELF_PLATFORM_SIZE];
+
 struct cpu_info {
        unsigned int cpu_mhz_dynamic;
        unsigned int cpu_mhz_static;
@@ -113,15 +118,33 @@ static void show_facilities(struct seq_file *m)
 static void show_cpu_summary(struct seq_file *m, void *v)
 {
        static const char *hwcap_str[] = {
-               "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp",
-               "edat", "etf3eh", "highgprs", "te", "vx", "vxd", "vxe", "gs",
-               "vxe2", "vxp", "sort", "dflt"
-       };
-       static const char * const int_hwcap_str[] = {
-               "sie"
+               [HWCAP_NR_ESAN3]        = "esan3",
+               [HWCAP_NR_ZARCH]        = "zarch",
+               [HWCAP_NR_STFLE]        = "stfle",
+               [HWCAP_NR_MSA]          = "msa",
+               [HWCAP_NR_LDISP]        = "ldisp",
+               [HWCAP_NR_EIMM]         = "eimm",
+               [HWCAP_NR_DFP]          = "dfp",
+               [HWCAP_NR_HPAGE]        = "edat",
+               [HWCAP_NR_ETF3EH]       = "etf3eh",
+               [HWCAP_NR_HIGH_GPRS]    = "highgprs",
+               [HWCAP_NR_TE]           = "te",
+               [HWCAP_NR_VXRS]         = "vx",
+               [HWCAP_NR_VXRS_BCD]     = "vxd",
+               [HWCAP_NR_VXRS_EXT]     = "vxe",
+               [HWCAP_NR_GS]           = "gs",
+               [HWCAP_NR_VXRS_EXT2]    = "vxe2",
+               [HWCAP_NR_VXRS_PDE]     = "vxp",
+               [HWCAP_NR_SORT]         = "sort",
+               [HWCAP_NR_DFLT]         = "dflt",
+               [HWCAP_NR_VXRS_PDE2]    = "vxp2",
+               [HWCAP_NR_NNPA]         = "nnpa",
+               [HWCAP_NR_PCI_MIO]      = "pcimio",
+               [HWCAP_NR_SIE]          = "sie",
        };
        int i, cpu;
 
+       BUILD_BUG_ON(ARRAY_SIZE(hwcap_str) != HWCAP_NR_MAX);
        seq_printf(m, "vendor_id       : IBM/S390\n"
                   "# processors    : %i\n"
                   "bogomips per cpu: %lu.%02lu\n",
@@ -132,9 +155,6 @@ static void show_cpu_summary(struct seq_file *m, void *v)
        for (i = 0; i < ARRAY_SIZE(hwcap_str); i++)
                if (hwcap_str[i] && (elf_hwcap & (1UL << i)))
                        seq_printf(m, "%s ", hwcap_str[i]);
-       for (i = 0; i < ARRAY_SIZE(int_hwcap_str); i++)
-               if (int_hwcap_str[i] && (int_hwcap & (1UL << i)))
-                       seq_printf(m, "%s ", int_hwcap_str[i]);
        seq_puts(m, "\n");
        show_facilities(m);
        show_cacheinfo(m);
@@ -149,6 +169,141 @@ static void show_cpu_summary(struct seq_file *m, void *v)
        }
 }
 
+static int __init setup_hwcaps(void)
+{
+       /* instructions named N3, "backported" to esa-mode */
+       if (test_facility(0))
+               elf_hwcap |= HWCAP_ESAN3;
+
+       /* z/Architecture mode active */
+       elf_hwcap |= HWCAP_ZARCH;
+
+       /* store-facility-list-extended */
+       if (test_facility(7))
+               elf_hwcap |= HWCAP_STFLE;
+
+       /* message-security assist */
+       if (test_facility(17))
+               elf_hwcap |= HWCAP_MSA;
+
+       /* long-displacement */
+       if (test_facility(19))
+               elf_hwcap |= HWCAP_LDISP;
+
+       /* extended-immediate */
+       if (test_facility(21))
+               elf_hwcap |= HWCAP_EIMM;
+
+       /* extended-translation facility 3 enhancement */
+       if (test_facility(22) && test_facility(30))
+               elf_hwcap |= HWCAP_ETF3EH;
+
+       /* decimal floating point & perform floating point operation */
+       if (test_facility(42) && test_facility(44))
+               elf_hwcap |= HWCAP_DFP;
+
+       /* huge page support */
+       if (MACHINE_HAS_EDAT1)
+               elf_hwcap |= HWCAP_HPAGE;
+
+       /* 64-bit register support for 31-bit processes */
+       elf_hwcap |= HWCAP_HIGH_GPRS;
+
+       /* transactional execution */
+       if (MACHINE_HAS_TE)
+               elf_hwcap |= HWCAP_TE;
+
+       /*
+        * Vector extension can be disabled with the "novx" parameter.
+        * Use MACHINE_HAS_VX instead of facility bit 129.
+        */
+       if (MACHINE_HAS_VX) {
+               elf_hwcap |= HWCAP_VXRS;
+               if (test_facility(134))
+                       elf_hwcap |= HWCAP_VXRS_BCD;
+               if (test_facility(135))
+                       elf_hwcap |= HWCAP_VXRS_EXT;
+               if (test_facility(148))
+                       elf_hwcap |= HWCAP_VXRS_EXT2;
+               if (test_facility(152))
+                       elf_hwcap |= HWCAP_VXRS_PDE;
+               if (test_facility(192))
+                       elf_hwcap |= HWCAP_VXRS_PDE2;
+       }
+
+       if (test_facility(150))
+               elf_hwcap |= HWCAP_SORT;
+
+       if (test_facility(151))
+               elf_hwcap |= HWCAP_DFLT;
+
+       if (test_facility(165))
+               elf_hwcap |= HWCAP_NNPA;
+
+       /* guarded storage */
+       if (MACHINE_HAS_GS)
+               elf_hwcap |= HWCAP_GS;
+
+       if (MACHINE_HAS_PCI_MIO)
+               elf_hwcap |= HWCAP_PCI_MIO;
+
+       /* virtualization support */
+       if (sclp.has_sief2)
+               elf_hwcap |= HWCAP_SIE;
+
+       return 0;
+}
+arch_initcall(setup_hwcaps);
+
+static int __init setup_elf_platform(void)
+{
+       struct cpuid cpu_id;
+
+       get_cpu_id(&cpu_id);
+       add_device_randomness(&cpu_id, sizeof(cpu_id));
+       switch (cpu_id.machine) {
+       case 0x2064:
+       case 0x2066:
+       default:        /* Use "z900" as default for 64 bit kernels. */
+               strcpy(elf_platform, "z900");
+               break;
+       case 0x2084:
+       case 0x2086:
+               strcpy(elf_platform, "z990");
+               break;
+       case 0x2094:
+       case 0x2096:
+               strcpy(elf_platform, "z9-109");
+               break;
+       case 0x2097:
+       case 0x2098:
+               strcpy(elf_platform, "z10");
+               break;
+       case 0x2817:
+       case 0x2818:
+               strcpy(elf_platform, "z196");
+               break;
+       case 0x2827:
+       case 0x2828:
+               strcpy(elf_platform, "zEC12");
+               break;
+       case 0x2964:
+       case 0x2965:
+               strcpy(elf_platform, "z13");
+               break;
+       case 0x3906:
+       case 0x3907:
+               strcpy(elf_platform, "z14");
+               break;
+       case 0x8561:
+       case 0x8562:
+               strcpy(elf_platform, "z15");
+               break;
+       }
+       return 0;
+}
+arch_initcall(setup_elf_platform);
+
 static void show_cpu_topology(struct seq_file *m, unsigned long n)
 {
 #ifdef CONFIG_SCHED_TOPOLOGY
@@ -210,7 +365,7 @@ static inline void *c_update(loff_t *pos)
 
 static void *c_start(struct seq_file *m, loff_t *pos)
 {
-       get_online_cpus();
+       cpus_read_lock();
        return c_update(pos);
 }
 
@@ -222,7 +377,7 @@ static void *c_next(struct seq_file *m, void *v, loff_t *pos)
 
 static void c_stop(struct seq_file *m, void *v)
 {
-       put_online_cpus();
+       cpus_read_unlock();
 }
 
 const struct seq_operations cpuinfo_op = {
index ff0f9e8..fe14beb 100644 (file)
@@ -89,27 +89,71 @@ EXPORT_SYMBOL(console_devno);
 unsigned int console_irq = -1;
 EXPORT_SYMBOL(console_irq);
 
-unsigned long elf_hwcap __read_mostly = 0;
-char elf_platform[ELF_PLATFORM_SIZE];
+/*
+ * Some code and data needs to stay below 2 GB, even when the kernel would be
+ * relocated above 2 GB, because it has to use 31 bit addresses.
+ * Such code and data is part of the .amode31 section.
+ */
+unsigned long __amode31_ref __samode31 = __pa(&_samode31);
+unsigned long __amode31_ref __eamode31 = __pa(&_eamode31);
+unsigned long __amode31_ref __stext_amode31 = __pa(&_stext_amode31);
+unsigned long __amode31_ref __etext_amode31 = __pa(&_etext_amode31);
+struct exception_table_entry __amode31_ref *__start_amode31_ex_table = _start_amode31_ex_table;
+struct exception_table_entry __amode31_ref *__stop_amode31_ex_table = _stop_amode31_ex_table;
+
+/*
+ * Control registers CR2, CR5 and CR15 are initialized with addresses
+ * of tables that must be placed below 2G which is handled by the AMODE31
+ * sections.
+ * Because the AMODE31 sections are relocated below 2G at startup,
+ * the content of control registers CR2, CR5 and CR15 must be updated
+ * with new addresses after the relocation. The initial initialization of
+ * control registers occurs in head64.S and then gets updated again after AMODE31
+ * relocation. We must access the relevant AMODE31 tables indirectly via
+ * pointers placed in the .amode31.refs linker section. Those pointers get
+ * updated automatically during AMODE31 relocation and always contain a valid
+ * address within AMODE31 sections.
+ */
+
+static __amode31_data u32 __ctl_duct_amode31[16] __aligned(64);
+
+static __amode31_data u64 __ctl_aste_amode31[8] __aligned(64) = {
+       [1] = 0xffffffffffffffff
+};
+
+static __amode31_data u32 __ctl_duald_amode31[32] __aligned(128) = {
+       0x80000000, 0, 0, 0,
+       0x80000000, 0, 0, 0,
+       0x80000000, 0, 0, 0,
+       0x80000000, 0, 0, 0,
+       0x80000000, 0, 0, 0,
+       0x80000000, 0, 0, 0,
+       0x80000000, 0, 0, 0,
+       0x80000000, 0, 0, 0
+};
+
+static __amode31_data u32 __ctl_linkage_stack_amode31[8] __aligned(64) = {
+       0, 0, 0x89000000, 0,
+       0, 0, 0x8a000000, 0
+};
 
-unsigned long int_hwcap = 0;
+static u64 __amode31_ref *__ctl_aste = __ctl_aste_amode31;
+static u32 __amode31_ref *__ctl_duald = __ctl_duald_amode31;
+static u32 __amode31_ref *__ctl_linkage_stack = __ctl_linkage_stack_amode31;
+static u32 __amode31_ref *__ctl_duct = __ctl_duct_amode31;
 
 int __bootdata(noexec_disabled);
 unsigned long __bootdata(ident_map_size);
 struct mem_detect_info __bootdata(mem_detect);
+struct initrd_data __bootdata(initrd_data);
 
-struct exception_table_entry *__bootdata_preserved(__start_dma_ex_table);
-struct exception_table_entry *__bootdata_preserved(__stop_dma_ex_table);
-unsigned long __bootdata_preserved(__stext_dma);
-unsigned long __bootdata_preserved(__etext_dma);
-unsigned long __bootdata_preserved(__sdma);
-unsigned long __bootdata_preserved(__edma);
 unsigned long __bootdata_preserved(__kaslr_offset);
 unsigned int __bootdata_preserved(zlib_dfltcc_support);
 EXPORT_SYMBOL(zlib_dfltcc_support);
 u64 __bootdata_preserved(stfle_fac_list[16]);
 EXPORT_SYMBOL(stfle_fac_list);
 u64 __bootdata_preserved(alt_stfle_fac_list[16]);
+struct oldmem_data __bootdata_preserved(oldmem_data);
 
 unsigned long VMALLOC_START;
 EXPORT_SYMBOL(VMALLOC_START);
@@ -254,7 +298,7 @@ static void __init setup_zfcpdump(void)
 {
        if (!is_ipl_type_dump())
                return;
-       if (OLDMEM_BASE)
+       if (oldmem_data.start)
                return;
        strcat(boot_command_line, " cio_ignore=all,!ipldev,!condev");
        console_loglevel = 2;
@@ -421,7 +465,7 @@ static void __init setup_lowcore_dat_off(void)
        lc->restart_stack = (unsigned long) restart_stack;
        lc->restart_fn = (unsigned long) do_restart;
        lc->restart_data = 0;
-       lc->restart_source = -1UL;
+       lc->restart_source = -1U;
 
        mcck_stack = (unsigned long)memblock_alloc(THREAD_SIZE, THREAD_SIZE);
        if (!mcck_stack)
@@ -450,12 +494,19 @@ static void __init setup_lowcore_dat_off(void)
 
 static void __init setup_lowcore_dat_on(void)
 {
+       struct lowcore *lc = lowcore_ptr[0];
+
        __ctl_clear_bit(0, 28);
        S390_lowcore.external_new_psw.mask |= PSW_MASK_DAT;
        S390_lowcore.svc_new_psw.mask |= PSW_MASK_DAT;
        S390_lowcore.program_new_psw.mask |= PSW_MASK_DAT;
        S390_lowcore.io_new_psw.mask |= PSW_MASK_DAT;
+       __ctl_store(S390_lowcore.cregs_save_area, 0, 15);
        __ctl_set_bit(0, 28);
+       mem_assign_absolute(S390_lowcore.restart_flags, RESTART_FLAG_CTLREGS);
+       mem_assign_absolute(S390_lowcore.program_new_psw, lc->program_new_psw);
+       memcpy_absolute(&S390_lowcore.cregs_save_area, lc->cregs_save_area,
+                       sizeof(S390_lowcore.cregs_save_area));
 }
 
 static struct resource code_resource = {
@@ -610,9 +661,9 @@ static void __init reserve_crashkernel(void)
                return;
        }
 
-       low = crash_base ?: OLDMEM_BASE;
+       low = crash_base ?: oldmem_data.start;
        high = low + crash_size;
-       if (low >= OLDMEM_BASE && high <= OLDMEM_BASE + OLDMEM_SIZE) {
+       if (low >= oldmem_data.start && high <= oldmem_data.start + oldmem_data.size) {
                /* The crashkernel fits into OLDMEM, reuse OLDMEM */
                crash_base = low;
        } else {
@@ -639,7 +690,7 @@ static void __init reserve_crashkernel(void)
        if (register_memory_notifier(&kdump_mem_nb))
                return;
 
-       if (!OLDMEM_BASE && MACHINE_IS_VM)
+       if (!oldmem_data.start && MACHINE_IS_VM)
                diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size));
        crashk_res.start = crash_base;
        crashk_res.end = crash_base + crash_size - 1;
@@ -658,11 +709,11 @@ static void __init reserve_crashkernel(void)
 static void __init reserve_initrd(void)
 {
 #ifdef CONFIG_BLK_DEV_INITRD
-       if (!INITRD_START || !INITRD_SIZE)
+       if (!initrd_data.start || !initrd_data.size)
                return;
-       initrd_start = INITRD_START;
-       initrd_end = initrd_start + INITRD_SIZE;
-       memblock_reserve(INITRD_START, INITRD_SIZE);
+       initrd_start = initrd_data.start;
+       initrd_end = initrd_start + initrd_data.size;
+       memblock_reserve(initrd_data.start, initrd_data.size);
 #endif
 }
 
@@ -732,10 +783,10 @@ static void __init memblock_add_mem_detect_info(void)
 static void __init check_initrd(void)
 {
 #ifdef CONFIG_BLK_DEV_INITRD
-       if (INITRD_START && INITRD_SIZE &&
-           !memblock_is_region_memory(INITRD_START, INITRD_SIZE)) {
+       if (initrd_data.start && initrd_data.size &&
+           !memblock_is_region_memory(initrd_data.start, initrd_data.size)) {
                pr_err("The initial RAM disk does not fit into the memory\n");
-               memblock_free(INITRD_START, INITRD_SIZE);
+               memblock_free(initrd_data.start, initrd_data.size);
                initrd_start = initrd_end = 0;
        }
 #endif
@@ -748,10 +799,10 @@ static void __init reserve_kernel(void)
 {
        unsigned long start_pfn = PFN_UP(__pa(_end));
 
-       memblock_reserve(0, HEAD_END);
+       memblock_reserve(0, STARTUP_NORMAL_OFFSET);
+       memblock_reserve((unsigned long)sclp_early_sccb, EXT_SCCB_READ_SCP);
        memblock_reserve((unsigned long)_stext, PFN_PHYS(start_pfn)
                         - (unsigned long)_stext);
-       memblock_reserve(__sdma, __edma - __sdma);
 }
 
 static void __init setup_memory(void)
@@ -771,152 +822,52 @@ static void __init setup_memory(void)
        memblock_enforce_memory_limit(memblock_end_of_DRAM());
 }
 
-/*
- * Setup hardware capabilities.
- */
-static int __init setup_hwcaps(void)
+static void __init relocate_amode31_section(void)
 {
-       static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 };
-       struct cpuid cpu_id;
-       int i;
-
-       /*
-        * The store facility list bits numbers as found in the principles
-        * of operation are numbered with bit 1UL<<31 as number 0 to
-        * bit 1UL<<0 as number 31.
-        *   Bit 0: instructions named N3, "backported" to esa-mode
-        *   Bit 2: z/Architecture mode is active
-        *   Bit 7: the store-facility-list-extended facility is installed
-        *   Bit 17: the message-security assist is installed
-        *   Bit 19: the long-displacement facility is installed
-        *   Bit 21: the extended-immediate facility is installed
-        *   Bit 22: extended-translation facility 3 is installed
-        *   Bit 30: extended-translation facility 3 enhancement facility
-        * These get translated to:
-        *   HWCAP_S390_ESAN3 bit 0, HWCAP_S390_ZARCH bit 1,
-        *   HWCAP_S390_STFLE bit 2, HWCAP_S390_MSA bit 3,
-        *   HWCAP_S390_LDISP bit 4, HWCAP_S390_EIMM bit 5 and
-        *   HWCAP_S390_ETF3EH bit 8 (22 && 30).
-        */
-       for (i = 0; i < 6; i++)
-               if (test_facility(stfl_bits[i]))
-                       elf_hwcap |= 1UL << i;
-
-       if (test_facility(22) && test_facility(30))
-               elf_hwcap |= HWCAP_S390_ETF3EH;
-
-       /*
-        * Check for additional facilities with store-facility-list-extended.
-        * stfle stores doublewords (8 byte) with bit 1ULL<<63 as bit 0
-        * and 1ULL<<0 as bit 63. Bits 0-31 contain the same information
-        * as stored by stfl, bits 32-xxx contain additional facilities.
-        * How many facility words are stored depends on the number of
-        * doublewords passed to the instruction. The additional facilities
-        * are:
-        *   Bit 42: decimal floating point facility is installed
-        *   Bit 44: perform floating point operation facility is installed
-        * translated to:
-        *   HWCAP_S390_DFP bit 6 (42 && 44).
-        */
-       if ((elf_hwcap & (1UL << 2)) && test_facility(42) && test_facility(44))
-               elf_hwcap |= HWCAP_S390_DFP;
-
-       /*
-        * Huge page support HWCAP_S390_HPAGE is bit 7.
-        */
-       if (MACHINE_HAS_EDAT1)
-               elf_hwcap |= HWCAP_S390_HPAGE;
-
-       /*
-        * 64-bit register support for 31-bit processes
-        * HWCAP_S390_HIGH_GPRS is bit 9.
-        */
-       elf_hwcap |= HWCAP_S390_HIGH_GPRS;
-
-       /*
-        * Transactional execution support HWCAP_S390_TE is bit 10.
-        */
-       if (MACHINE_HAS_TE)
-               elf_hwcap |= HWCAP_S390_TE;
-
-       /*
-        * Vector extension HWCAP_S390_VXRS is bit 11. The Vector extension
-        * can be disabled with the "novx" parameter. Use MACHINE_HAS_VX
-        * instead of facility bit 129.
-        */
-       if (MACHINE_HAS_VX) {
-               elf_hwcap |= HWCAP_S390_VXRS;
-               if (test_facility(134))
-                       elf_hwcap |= HWCAP_S390_VXRS_BCD;
-               if (test_facility(135))
-                       elf_hwcap |= HWCAP_S390_VXRS_EXT;
-               if (test_facility(148))
-                       elf_hwcap |= HWCAP_S390_VXRS_EXT2;
-               if (test_facility(152))
-                       elf_hwcap |= HWCAP_S390_VXRS_PDE;
-       }
-       if (test_facility(150))
-               elf_hwcap |= HWCAP_S390_SORT;
-       if (test_facility(151))
-               elf_hwcap |= HWCAP_S390_DFLT;
-
-       /*
-        * Guarded storage support HWCAP_S390_GS is bit 12.
-        */
-       if (MACHINE_HAS_GS)
-               elf_hwcap |= HWCAP_S390_GS;
-
-       get_cpu_id(&cpu_id);
-       add_device_randomness(&cpu_id, sizeof(cpu_id));
-       switch (cpu_id.machine) {
-       case 0x2064:
-       case 0x2066:
-       default:        /* Use "z900" as default for 64 bit kernels. */
-               strcpy(elf_platform, "z900");
-               break;
-       case 0x2084:
-       case 0x2086:
-               strcpy(elf_platform, "z990");
-               break;
-       case 0x2094:
-       case 0x2096:
-               strcpy(elf_platform, "z9-109");
-               break;
-       case 0x2097:
-       case 0x2098:
-               strcpy(elf_platform, "z10");
-               break;
-       case 0x2817:
-       case 0x2818:
-               strcpy(elf_platform, "z196");
-               break;
-       case 0x2827:
-       case 0x2828:
-               strcpy(elf_platform, "zEC12");
-               break;
-       case 0x2964:
-       case 0x2965:
-               strcpy(elf_platform, "z13");
-               break;
-       case 0x3906:
-       case 0x3907:
-               strcpy(elf_platform, "z14");
-               break;
-       case 0x8561:
-       case 0x8562:
-               strcpy(elf_platform, "z15");
-               break;
-       }
-
-       /*
-        * Virtualization support HWCAP_INT_SIE is bit 0.
-        */
-       if (sclp.has_sief2)
-               int_hwcap |= HWCAP_INT_SIE;
+       unsigned long amode31_addr, amode31_size;
+       long amode31_offset;
+       long *ptr;
+
+       /* Allocate a new AMODE31 capable memory region */
+       amode31_size = __eamode31 - __samode31;
+       pr_info("Relocating AMODE31 section of size 0x%08lx\n", amode31_size);
+       amode31_addr = (unsigned long)memblock_alloc_low(amode31_size, PAGE_SIZE);
+       if (!amode31_addr)
+               panic("Failed to allocate memory for AMODE31 section\n");
+       amode31_offset = amode31_addr - __samode31;
+
+       /* Move original AMODE31 section to the new one */
+       memmove((void *)amode31_addr, (void *)__samode31, amode31_size);
+       /* Zero out the old AMODE31 section to catch invalid accesses within it */
+       memset((void *)__samode31, 0, amode31_size);
+
+       /* Update all AMODE31 region references */
+       for (ptr = _start_amode31_refs; ptr != _end_amode31_refs; ptr++)
+               *ptr += amode31_offset;
+}
 
-       return 0;
+/* This must be called after AMODE31 relocation */
+static void __init setup_cr(void)
+{
+       union ctlreg2 cr2;
+       union ctlreg5 cr5;
+       union ctlreg15 cr15;
+
+       __ctl_duct[1] = (unsigned long)__ctl_aste;
+       __ctl_duct[2] = (unsigned long)__ctl_aste;
+       __ctl_duct[4] = (unsigned long)__ctl_duald;
+
+       /* Update control registers CR2, CR5 and CR15 */
+       __ctl_store(cr2.val, 2, 2);
+       __ctl_store(cr5.val, 5, 5);
+       __ctl_store(cr15.val, 15, 15);
+       cr2.ducto = (unsigned long)__ctl_duct >> 6;
+       cr5.pasteo = (unsigned long)__ctl_duct >> 6;
+       cr15.lsea = (unsigned long)__ctl_linkage_stack >> 3;
+       __ctl_load(cr2.val, 2, 2);
+       __ctl_load(cr5.val, 5, 5);
+       __ctl_load(cr15.val, 15, 15);
 }
-arch_initcall(setup_hwcaps);
 
 /*
  * Add system information as device randomness
@@ -1059,6 +1010,9 @@ void __init setup_arch(char **cmdline_p)
 
        free_mem_detect_info();
 
+       relocate_amode31_section();
+       setup_cr();
+
        setup_uv();
        setup_memory_end();
        setup_memory();
index 78ef53b..307f5d9 100644 (file)
@@ -533,9 +533,3 @@ void arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal)
         */
        restore_saved_sigmask();
 }
-
-void do_notify_resume(struct pt_regs *regs)
-{
-       tracehook_notify_resume(regs);
-       rseq_handle_notify_resume(NULL, regs);
-}
index 8984711..2a991e4 100644 (file)
@@ -252,6 +252,7 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
        cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask);
        cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
        lc->cpu_nr = cpu;
+       lc->restart_flags = RESTART_FLAG_CTLREGS;
        lc->spinlock_lockval = arch_spin_lockval(cpu);
        lc->spinlock_index = 0;
        lc->percpu_offset = __per_cpu_offset[cpu];
@@ -294,10 +295,10 @@ static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
 
        cpu = pcpu - pcpu_devices;
        lc = lowcore_ptr[cpu];
-       lc->restart_stack = lc->nodat_stack;
+       lc->restart_stack = lc->kernel_stack;
        lc->restart_fn = (unsigned long) func;
        lc->restart_data = (unsigned long) data;
-       lc->restart_source = -1UL;
+       lc->restart_source = -1U;
        pcpu_sigp_retry(pcpu, SIGP_RESTART, 0);
 }
 
@@ -311,12 +312,12 @@ static void __pcpu_delegate(pcpu_delegate_fn *func, void *data)
        func(data);     /* should not return */
 }
 
-static void __no_sanitize_address pcpu_delegate(struct pcpu *pcpu,
-                                               pcpu_delegate_fn *func,
-                                               void *data, unsigned long stack)
+static void pcpu_delegate(struct pcpu *pcpu,
+                         pcpu_delegate_fn *func,
+                         void *data, unsigned long stack)
 {
        struct lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
-       unsigned long source_cpu = stap();
+       unsigned int source_cpu = stap();
 
        __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
        if (pcpu->address == source_cpu) {
@@ -569,6 +570,9 @@ static void smp_ctl_bit_callback(void *info)
        __ctl_load(cregs, 0, 15);
 }
 
+static DEFINE_SPINLOCK(ctl_lock);
+static unsigned long ctlreg;
+
 /*
  * Set a bit in a control register of all cpus
  */
@@ -576,6 +580,11 @@ void smp_ctl_set_bit(int cr, int bit)
 {
        struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr };
 
+       spin_lock(&ctl_lock);
+       memcpy_absolute(&ctlreg, &S390_lowcore.cregs_save_area[cr], sizeof(ctlreg));
+       __set_bit(bit, &ctlreg);
+       memcpy_absolute(&S390_lowcore.cregs_save_area[cr], &ctlreg, sizeof(ctlreg));
+       spin_unlock(&ctl_lock);
        on_each_cpu(smp_ctl_bit_callback, &parms, 1);
 }
 EXPORT_SYMBOL(smp_ctl_set_bit);
@@ -587,6 +596,11 @@ void smp_ctl_clear_bit(int cr, int bit)
 {
        struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr };
 
+       spin_lock(&ctl_lock);
+       memcpy_absolute(&ctlreg, &S390_lowcore.cregs_save_area[cr], sizeof(ctlreg));
+       __clear_bit(bit, &ctlreg);
+       memcpy_absolute(&S390_lowcore.cregs_save_area[cr], &ctlreg, sizeof(ctlreg));
+       spin_unlock(&ctl_lock);
        on_each_cpu(smp_ctl_bit_callback, &parms, 1);
 }
 EXPORT_SYMBOL(smp_ctl_clear_bit);
@@ -673,7 +687,7 @@ void __init smp_save_dump_cpus(void)
        unsigned long page;
        bool is_boot_cpu;
 
-       if (!(OLDMEM_BASE || is_ipl_type_dump()))
+       if (!(oldmem_data.start || is_ipl_type_dump()))
                /* No previous system present, normal boot. */
                return;
        /* Allocate a page as dumping area for the store status sigps */
@@ -704,12 +718,12 @@ void __init smp_save_dump_cpus(void)
                 * these registers an SCLP request is required which is
                 * done by drivers/s390/char/zcore.c:init_cpu_info()
                 */
-               if (!is_boot_cpu || OLDMEM_BASE)
+               if (!is_boot_cpu || oldmem_data.start)
                        /* Get the CPU registers */
                        smp_save_cpu_regs(sa, addr, is_boot_cpu, page);
        }
        memblock_free(page, PAGE_SIZE);
-       diag_dma_ops.diag308_reset();
+       diag_amode31_ops.diag308_reset();
        pcpu_set_smt(0);
 }
 #endif /* CONFIG_CRASH_DUMP */
@@ -793,7 +807,7 @@ static int __smp_rescan_cpus(struct sclp_core_info *info, bool early)
        u16 core_id;
        int nr, i;
 
-       get_online_cpus();
+       cpus_read_lock();
        mutex_lock(&smp_cpu_state_mutex);
        nr = 0;
        cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
@@ -816,7 +830,7 @@ static int __smp_rescan_cpus(struct sclp_core_info *info, bool early)
                nr += smp_add_core(&info->core[i], &avail, configured, early);
        }
        mutex_unlock(&smp_cpu_state_mutex);
-       put_online_cpus();
+       cpus_read_unlock();
        return nr;
 }
 
@@ -868,11 +882,19 @@ void __init smp_detect_cpus(void)
        memblock_free_early((unsigned long)info, sizeof(*info));
 }
 
-static void smp_init_secondary(void)
+/*
+ *     Activate a secondary processor.
+ */
+static void smp_start_secondary(void *cpuvoid)
 {
        int cpu = raw_smp_processor_id();
 
        S390_lowcore.last_update_clock = get_tod_clock();
+       S390_lowcore.restart_stack = (unsigned long)restart_stack;
+       S390_lowcore.restart_fn = (unsigned long)do_restart;
+       S390_lowcore.restart_data = 0;
+       S390_lowcore.restart_source = -1U;
+       S390_lowcore.restart_flags = 0;
        restore_access_regs(S390_lowcore.access_regs_save_area);
        cpu_init();
        rcu_cpu_starting(cpu);
@@ -892,20 +914,6 @@ static void smp_init_secondary(void)
        cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 }
 
-/*
- *     Activate a secondary processor.
- */
-static void __no_sanitize_address smp_start_secondary(void *cpuvoid)
-{
-       S390_lowcore.restart_stack = (unsigned long) restart_stack;
-       S390_lowcore.restart_fn = (unsigned long) do_restart;
-       S390_lowcore.restart_data = 0;
-       S390_lowcore.restart_source = -1UL;
-       __ctl_load(S390_lowcore.cregs_save_area, 0, 15);
-       __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
-       call_on_stack_noreturn(smp_init_secondary, S390_lowcore.kernel_stack);
-}
-
 /* Upping and downing of CPUs */
 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
 {
@@ -1055,7 +1063,7 @@ static ssize_t cpu_configure_store(struct device *dev,
                return -EINVAL;
        if (val != 0 && val != 1)
                return -EINVAL;
-       get_online_cpus();
+       cpus_read_lock();
        mutex_lock(&smp_cpu_state_mutex);
        rc = -EBUSY;
        /* disallow configuration changes of online cpus and cpu 0 */
@@ -1104,7 +1112,7 @@ static ssize_t cpu_configure_store(struct device *dev,
        }
 out:
        mutex_unlock(&smp_cpu_state_mutex);
-       put_online_cpus();
+       cpus_read_unlock();
        return rc ? rc : count;
 }
 static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
diff --git a/arch/s390/kernel/text_amode31.S b/arch/s390/kernel/text_amode31.S
new file mode 100644 (file)
index 0000000..868e4a6
--- /dev/null
@@ -0,0 +1,158 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Code that needs to run below 2 GB.
+ *
+ * Copyright IBM Corp. 2019
+ */
+
+#include <linux/linkage.h>
+#include <asm/errno.h>
+#include <asm/sigp.h>
+
+       .section .amode31.text,"ax"
+/*
+ * Simplified version of expoline thunk. The normal thunks can not be used here,
+ * because they might be more than 2 GB away, and not reachable by the relative
+ * branch. No comdat, exrl, etc. optimizations used here, because it only
+ * affects a few functions that are not performance-relevant.
+ */
+       .macro BR_EX_AMODE31_r14
+       larl    %r1,0f
+       ex      0,0(%r1)
+       j       .
+0:     br      %r14
+       .endm
+
+/*
+ * int _diag14_amode31(unsigned long rx, unsigned long ry1, unsigned long subcode)
+ */
+ENTRY(_diag14_amode31)
+       lgr     %r1,%r2
+       lgr     %r2,%r3
+       lgr     %r3,%r4
+       lhi     %r5,-EIO
+       sam31
+       diag    %r1,%r2,0x14
+.Ldiag14_ex:
+       ipm     %r5
+       srl     %r5,28
+.Ldiag14_fault:
+       sam64
+       lgfr    %r2,%r5
+       BR_EX_AMODE31_r14
+       EX_TABLE_AMODE31(.Ldiag14_ex, .Ldiag14_fault)
+ENDPROC(_diag14_amode31)
+
+/*
+ * int _diag210_amode31(struct diag210 *addr)
+ */
+ENTRY(_diag210_amode31)
+       lgr     %r1,%r2
+       lhi     %r2,-1
+       sam31
+       diag    %r1,%r0,0x210
+.Ldiag210_ex:
+       ipm     %r2
+       srl     %r2,28
+.Ldiag210_fault:
+       sam64
+       lgfr    %r2,%r2
+       BR_EX_AMODE31_r14
+       EX_TABLE_AMODE31(.Ldiag210_ex, .Ldiag210_fault)
+ENDPROC(_diag210_amode31)
+
+/*
+ * int _diag26c_amode31(void *req, void *resp, enum diag26c_sc subcode)
+ */
+ENTRY(_diag26c_amode31)
+       lghi    %r5,-EOPNOTSUPP
+       sam31
+       diag    %r2,%r4,0x26c
+.Ldiag26c_ex:
+       sam64
+       lgfr    %r2,%r5
+       BR_EX_AMODE31_r14
+       EX_TABLE_AMODE31(.Ldiag26c_ex, .Ldiag26c_ex)
+ENDPROC(_diag26c_amode31)
+
+/*
+ * void _diag0c_amode31(struct hypfs_diag0c_entry *entry)
+ */
+ENTRY(_diag0c_amode31)
+       sam31
+       diag    %r2,%r2,0x0c
+       sam64
+       BR_EX_AMODE31_r14
+ENDPROC(_diag0c_amode31)
+
+/*
+ * void _diag308_reset_amode31(void)
+ *
+ * Calls diag 308 subcode 1 and continues execution
+ */
+ENTRY(_diag308_reset_amode31)
+       larl    %r4,.Lctlregs           # Save control registers
+       stctg   %c0,%c15,0(%r4)
+       lg      %r2,0(%r4)              # Disable lowcore protection
+       nilh    %r2,0xefff
+       larl    %r4,.Lctlreg0
+       stg     %r2,0(%r4)
+       lctlg   %c0,%c0,0(%r4)
+       larl    %r4,.Lfpctl             # Floating point control register
+       stfpc   0(%r4)
+       larl    %r4,.Lprefix            # Save prefix register
+       stpx    0(%r4)
+       larl    %r4,.Lprefix_zero       # Set prefix register to 0
+       spx     0(%r4)
+       larl    %r4,.Lcontinue_psw      # Save PSW flags
+       epsw    %r2,%r3
+       stm     %r2,%r3,0(%r4)
+       larl    %r4,.Lrestart_part2     # Setup restart PSW at absolute 0
+       larl    %r3,.Lrestart_diag308_psw
+       og      %r4,0(%r3)              # Save PSW
+       lghi    %r3,0
+       sturg   %r4,%r3                 # Use sturg, because of large pages
+       lghi    %r1,1
+       lghi    %r0,0
+       diag    %r0,%r1,0x308
+.Lrestart_part2:
+       lhi     %r0,0                   # Load r0 with zero
+       lhi     %r1,2                   # Use mode 2 = ESAME (dump)
+       sigp    %r1,%r0,SIGP_SET_ARCHITECTURE   # Switch to ESAME mode
+       sam64                           # Switch to 64 bit addressing mode
+       larl    %r4,.Lctlregs           # Restore control registers
+       lctlg   %c0,%c15,0(%r4)
+       larl    %r4,.Lfpctl             # Restore floating point ctl register
+       lfpc    0(%r4)
+       larl    %r4,.Lprefix            # Restore prefix register
+       spx     0(%r4)
+       larl    %r4,.Lcontinue_psw      # Restore PSW flags
+       larl    %r2,.Lcontinue
+       stg     %r2,8(%r4)
+       lpswe   0(%r4)
+.Lcontinue:
+       BR_EX_AMODE31_r14
+ENDPROC(_diag308_reset_amode31)
+
+       .section .amode31.data,"aw",@progbits
+.align 8
+.Lrestart_diag308_psw:
+       .long   0x00080000,0x80000000
+
+.align 8
+.Lcontinue_psw:
+       .quad   0,0
+
+.align 8
+.Lctlreg0:
+       .quad   0
+.Lctlregs:
+       .rept   16
+       .quad   0
+       .endr
+.Lfpctl:
+       .long   0
+.Lprefix:
+       .long   0
+.Lprefix_zero:
+       .long   0
index 26aa261..d2458a2 100644 (file)
@@ -406,7 +406,7 @@ static ssize_t dispatching_store(struct device *dev,
        if (val != 0 && val != 1)
                return -EINVAL;
        rc = 0;
-       get_online_cpus();
+       cpus_read_lock();
        mutex_lock(&smp_cpu_state_mutex);
        if (cpu_management == val)
                goto out;
@@ -417,7 +417,7 @@ static ssize_t dispatching_store(struct device *dev,
        topology_expect_change();
 out:
        mutex_unlock(&smp_cpu_state_mutex);
-       put_online_cpus();
+       cpus_read_unlock();
        return rc ? rc : count;
 }
 static DEVICE_ATTR_RW(dispatching);
index 7694727..bcefc21 100644 (file)
@@ -291,7 +291,7 @@ static void __init test_monitor_call(void)
 
 void __init trap_init(void)
 {
-       sort_extable(__start_dma_ex_table, __stop_dma_ex_table);
+       sort_extable(__start_amode31_ex_table, __stop_amode31_ex_table);
        local_mcck_enable();
        test_monitor_call();
 }
index aeb0a15..5a656c7 100644 (file)
@@ -51,24 +51,9 @@ void __init setup_uv(void)
 {
        unsigned long uv_stor_base;
 
-       /*
-        * keep these conditions in line with has_uv_sec_stor_limit()
-        */
        if (!is_prot_virt_host())
                return;
 
-       if (is_prot_virt_guest()) {
-               prot_virt_host = 0;
-               pr_warn("Protected virtualization not available in protected guests.");
-               return;
-       }
-
-       if (!test_facility(158)) {
-               prot_virt_host = 0;
-               pr_warn("Protected virtualization not supported by the hardware.");
-               return;
-       }
-
        uv_stor_base = (unsigned long)memblock_alloc_try_nid(
                uv_info.uv_base_stor_len, SZ_1M, SZ_2G,
                MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
index 3457dcf..e3e6ac5 100644 (file)
@@ -36,6 +36,7 @@ CPPFLAGS_vdso32.lds += -P -C -U$(ARCH)
 GCOV_PROFILE := n
 UBSAN_SANITIZE := n
 KASAN_SANITIZE := n
+KCSAN_SANITIZE := n
 
 # Force dependency (incbin is bad)
 $(obj)/vdso32_wrapper.o : $(obj)/vdso32.so
index bff50b6..edf5ff1 100644 (file)
@@ -51,6 +51,7 @@ SECTIONS
 
        .rela.dyn ALIGN(8) : { *(.rela.dyn) }
        .got ALIGN(8)   : { *(.got .toc) }
+       .got.plt ALIGN(8) : { *(.got.plt) }
 
        _end = .;
        PROVIDE(end = .);
index 2a2092c..6568de2 100644 (file)
@@ -39,6 +39,7 @@ CPPFLAGS_vdso64.lds += -P -C -U$(ARCH)
 GCOV_PROFILE := n
 UBSAN_SANITIZE := n
 KASAN_SANITIZE := n
+KCSAN_SANITIZE := n
 
 # Force dependency (incbin is bad)
 $(obj)/vdso64_wrapper.o : $(obj)/vdso64.so
index d4fb336..4461ea1 100644 (file)
@@ -51,6 +51,7 @@ SECTIONS
 
        .rela.dyn ALIGN(8) : { *(.rela.dyn) }
        .got ALIGN(8)   : { *(.got .toc) }
+       .got.plt ALIGN(8) : { *(.got.plt) }
 
        _end = .;
        PROVIDE(end = .);
index 4c0e191..63bdb9e 100644 (file)
@@ -5,6 +5,7 @@
 
 #include <asm/thread_info.h>
 #include <asm/page.h>
+#include <asm/ftrace.lds.h>
 
 /*
  * Put .bss..swapper_pg_dir as the first thing in .bss. This will
@@ -46,6 +47,7 @@ SECTIONS
                KPROBES_TEXT
                IRQENTRY_TEXT
                SOFTIRQENTRY_TEXT
+               FTRACE_HOTPATCH_TRAMPOLINES_TEXT
                *(.text.*_indirect_*)
                *(.fixup)
                *(.gnu.warning)
@@ -71,6 +73,13 @@ SECTIONS
        RW_DATA(0x100, PAGE_SIZE, THREAD_SIZE)
        BOOT_DATA_PRESERVED
 
+       . = ALIGN(8);
+       .amode31.refs : {
+               _start_amode31_refs = .;
+               *(.amode31.refs)
+               _end_amode31_refs = .;
+       }
+
        _edata = .;             /* End of data section */
 
        /* will be freed after init */
@@ -136,6 +145,32 @@ SECTIONS
 
        BOOT_DATA
 
+       /*
+        * .amode31 section for code, data, ex_table that need to stay
+        * below 2 GB, even when the kernel is relocated above 2 GB.
+        */
+       . = ALIGN(PAGE_SIZE);
+       _samode31 = .;
+       .amode31.text : {
+               _stext_amode31 = .;
+               *(.amode31.text)
+               *(.amode31.text.*_indirect_*)
+               . = ALIGN(PAGE_SIZE);
+               _etext_amode31 = .;
+       }
+       . = ALIGN(16);
+       .amode31.ex_table : {
+               _start_amode31_ex_table = .;
+               KEEP(*(.amode31.ex_table))
+               _stop_amode31_ex_table = .;
+       }
+       . = ALIGN(PAGE_SIZE);
+       .amode31.data : {
+               *(.amode31.data)
+       }
+       . = ALIGN(PAGE_SIZE);
+       _eamode31 = .;
+
        /* early.c uses stsi, which requires page aligned data. */
        . = ALIGN(PAGE_SIZE);
        INIT_DATA_SECTION(0x100)
index 02c146f..807fa9d 100644 (file)
@@ -24,7 +24,7 @@ static int diag_release_pages(struct kvm_vcpu *vcpu)
 
        start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
        end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + PAGE_SIZE;
-       vcpu->stat.diagnose_10++;
+       vcpu->stat.instruction_diagnose_10++;
 
        if (start & ~PAGE_MASK || end & ~PAGE_MASK || start >= end
            || start < 2 * PAGE_SIZE)
@@ -74,7 +74,7 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
 
        VCPU_EVENT(vcpu, 3, "diag page reference parameter block at 0x%llx",
                   vcpu->run->s.regs.gprs[rx]);
-       vcpu->stat.diagnose_258++;
+       vcpu->stat.instruction_diagnose_258++;
        if (vcpu->run->s.regs.gprs[rx] & 7)
                return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
        rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], rx, &parm, sizeof(parm));
@@ -145,7 +145,7 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
 static int __diag_time_slice_end(struct kvm_vcpu *vcpu)
 {
        VCPU_EVENT(vcpu, 5, "%s", "diag time slice end");
-       vcpu->stat.diagnose_44++;
+       vcpu->stat.instruction_diagnose_44++;
        kvm_vcpu_on_spin(vcpu, true);
        return 0;
 }
@@ -169,7 +169,7 @@ static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
        int tid;
 
        tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
-       vcpu->stat.diagnose_9c++;
+       vcpu->stat.instruction_diagnose_9c++;
 
        /* yield to self */
        if (tid == vcpu->vcpu_id)
@@ -192,7 +192,7 @@ static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
                VCPU_EVENT(vcpu, 5,
                           "diag time slice end directed to %d: yield forwarded",
                           tid);
-               vcpu->stat.diagnose_9c_forward++;
+               vcpu->stat.diag_9c_forward++;
                return 0;
        }
 
@@ -203,7 +203,7 @@ static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
        return 0;
 no_yield:
        VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d: ignored", tid);
-       vcpu->stat.diagnose_9c_ignored++;
+       vcpu->stat.diag_9c_ignored++;
        return 0;
 }
 
@@ -213,7 +213,7 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
        unsigned long subcode = vcpu->run->s.regs.gprs[reg] & 0xffff;
 
        VCPU_EVENT(vcpu, 3, "diag ipl functions, subcode %lx", subcode);
-       vcpu->stat.diagnose_308++;
+       vcpu->stat.instruction_diagnose_308++;
        switch (subcode) {
        case 3:
                vcpu->run->s390_reset_flags = KVM_S390_RESET_CLEAR;
@@ -245,7 +245,7 @@ static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu)
 {
        int ret;
 
-       vcpu->stat.diagnose_500++;
+       vcpu->stat.instruction_diagnose_500++;
        /* No virtio-ccw notification? Get out quickly. */
        if (!vcpu->kvm->arch.css_support ||
            (vcpu->run->s.regs.gprs[1] != KVM_S390_VIRTIO_CCW_NOTIFY))
@@ -299,7 +299,7 @@ int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
        case 0x500:
                return __diag_virtio_hypercall(vcpu);
        default:
-               vcpu->stat.diagnose_other++;
+               vcpu->stat.instruction_diagnose_other++;
                return -EOPNOTSUPP;
        }
 }
index b655a7d..4527ac7 100644 (file)
@@ -163,15 +163,15 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
        STATS_DESC_COUNTER(VCPU, instruction_sigp_init_cpu_reset),
        STATS_DESC_COUNTER(VCPU, instruction_sigp_cpu_reset),
        STATS_DESC_COUNTER(VCPU, instruction_sigp_unknown),
-       STATS_DESC_COUNTER(VCPU, diagnose_10),
-       STATS_DESC_COUNTER(VCPU, diagnose_44),
-       STATS_DESC_COUNTER(VCPU, diagnose_9c),
-       STATS_DESC_COUNTER(VCPU, diagnose_9c_ignored),
-       STATS_DESC_COUNTER(VCPU, diagnose_9c_forward),
-       STATS_DESC_COUNTER(VCPU, diagnose_258),
-       STATS_DESC_COUNTER(VCPU, diagnose_308),
-       STATS_DESC_COUNTER(VCPU, diagnose_500),
-       STATS_DESC_COUNTER(VCPU, diagnose_other),
+       STATS_DESC_COUNTER(VCPU, instruction_diagnose_10),
+       STATS_DESC_COUNTER(VCPU, instruction_diagnose_44),
+       STATS_DESC_COUNTER(VCPU, instruction_diagnose_9c),
+       STATS_DESC_COUNTER(VCPU, diag_9c_ignored),
+       STATS_DESC_COUNTER(VCPU, diag_9c_forward),
+       STATS_DESC_COUNTER(VCPU, instruction_diagnose_258),
+       STATS_DESC_COUNTER(VCPU, instruction_diagnose_308),
+       STATS_DESC_COUNTER(VCPU, instruction_diagnose_500),
+       STATS_DESC_COUNTER(VCPU, instruction_diagnose_other),
        STATS_DESC_COUNTER(VCPU, pfault_sync)
 };
 static_assert(ARRAY_SIZE(kvm_vcpu_stats_desc) ==
index f289afe..bccbf39 100644 (file)
@@ -7,17 +7,10 @@
  *              Heiko Carstens <heiko.carstens@de.ibm.com>,
  */
 
-#include <linux/sched.h>
+#include <linux/processor.h>
 #include <linux/delay.h>
-#include <linux/timex.h>
-#include <linux/export.h>
-#include <linux/irqflags.h>
-#include <linux/interrupt.h>
-#include <linux/jump_label.h>
-#include <linux/irq.h>
-#include <asm/vtimer.h>
 #include <asm/div64.h>
-#include <asm/idle.h>
+#include <asm/timex.h>
 
 void __delay(unsigned long loops)
 {
index e40a306..0b0c8c2 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/seq_file.h>
 #include <linux/debugfs.h>
 #include <linux/mm.h>
+#include <linux/kfence.h>
 #include <linux/kasan.h>
 #include <asm/ptdump.h>
 #include <asm/kasan.h>
@@ -21,6 +22,10 @@ enum address_markers_idx {
        IDENTITY_BEFORE_END_NR,
        KERNEL_START_NR,
        KERNEL_END_NR,
+#ifdef CONFIG_KFENCE
+       KFENCE_START_NR,
+       KFENCE_END_NR,
+#endif
        IDENTITY_AFTER_NR,
        IDENTITY_AFTER_END_NR,
 #ifdef CONFIG_KASAN
@@ -40,6 +45,10 @@ static struct addr_marker address_markers[] = {
        [IDENTITY_BEFORE_END_NR] = {(unsigned long)_stext, "Identity Mapping End"},
        [KERNEL_START_NR]       = {(unsigned long)_stext, "Kernel Image Start"},
        [KERNEL_END_NR]         = {(unsigned long)_end, "Kernel Image End"},
+#ifdef CONFIG_KFENCE
+       [KFENCE_START_NR]       = {0, "KFence Pool Start"},
+       [KFENCE_END_NR]         = {0, "KFence Pool End"},
+#endif
        [IDENTITY_AFTER_NR]     = {(unsigned long)_end, "Identity Mapping Start"},
        [IDENTITY_AFTER_END_NR] = {0, "Identity Mapping End"},
 #ifdef CONFIG_KASAN
@@ -248,6 +257,9 @@ static void sort_address_markers(void)
 
 static int pt_dump_init(void)
 {
+#ifdef CONFIG_KFENCE
+       unsigned long kfence_start = (unsigned long)__kfence_pool;
+#endif
        /*
         * Figure out the maximum virtual address being accessible with the
         * kernel ASCE. We need this to keep the page table walker functions
@@ -262,6 +274,10 @@ static int pt_dump_init(void)
        address_markers[VMEMMAP_END_NR].start_address = (unsigned long)vmemmap + vmemmap_size;
        address_markers[VMALLOC_NR].start_address = VMALLOC_START;
        address_markers[VMALLOC_END_NR].start_address = VMALLOC_END;
+#ifdef CONFIG_KFENCE
+       address_markers[KFENCE_START_NR].start_address = kfence_start;
+       address_markers[KFENCE_END_NR].start_address = kfence_start + KFENCE_POOL_SIZE;
+#endif
        sort_address_markers();
 #ifdef CONFIG_PTDUMP_DEBUGFS
        debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops);
index e33c43b..212632d 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/kprobes.h>
 #include <linux/uaccess.h>
 #include <linux/hugetlb.h>
+#include <linux/kfence.h>
 #include <asm/asm-offsets.h>
 #include <asm/diag.h>
 #include <asm/gmap.h>
@@ -230,8 +231,8 @@ const struct exception_table_entry *s390_search_extables(unsigned long addr)
 {
        const struct exception_table_entry *fixup;
 
-       fixup = search_extable(__start_dma_ex_table,
-                              __stop_dma_ex_table - __start_dma_ex_table,
+       fixup = search_extable(__start_amode31_ex_table,
+                              __stop_amode31_ex_table - __start_amode31_ex_table,
                               addr);
        if (!fixup)
                fixup = search_exception_tables(addr);
@@ -356,6 +357,7 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
        unsigned long address;
        unsigned int flags;
        vm_fault_t fault;
+       bool is_write;
 
        tsk = current;
        /*
@@ -369,6 +371,8 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
 
        mm = tsk->mm;
        trans_exc_code = regs->int_parm_long;
+       address = trans_exc_code & __FAIL_ADDR_MASK;
+       is_write = (trans_exc_code & store_indication) == 0x400;
 
        /*
         * Verify that the fault happened in user space, that
@@ -379,6 +383,8 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
        type = get_fault_type(regs);
        switch (type) {
        case KERNEL_FAULT:
+               if (kfence_handle_page_fault(address, is_write, regs))
+                       return 0;
                goto out;
        case USER_FAULT:
        case GMAP_FAULT:
@@ -387,12 +393,11 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
                break;
        }
 
-       address = trans_exc_code & __FAIL_ADDR_MASK;
        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
        flags = FAULT_FLAG_DEFAULT;
        if (user_mode(regs))
                flags |= FAULT_FLAG_USER;
-       if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
+       if (access == VM_WRITE || is_write)
                flags |= FAULT_FLAG_WRITE;
        mmap_read_lock(mm);
 
index 8ac710d..f3db3ca 100644 (file)
@@ -34,6 +34,7 @@
 #include <asm/processor.h>
 #include <linux/uaccess.h>
 #include <asm/pgalloc.h>
+#include <asm/kfence.h>
 #include <asm/ptdump.h>
 #include <asm/dma.h>
 #include <asm/lowcore.h>
@@ -200,7 +201,7 @@ void __init mem_init(void)
         high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
 
        pv_init();
-
+       kfence_split_mapping();
        /* Setup guest page hinting */
        cmma_init();
 
index a0fdc6d..3e47351 100644 (file)
@@ -107,6 +107,9 @@ static void __init kasan_early_pgtable_populate(unsigned long address,
                sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC;
        }
 
+       /*
+        * The first 1MB of 1:1 mapping is mapped with 4KB pages
+        */
        while (address < end) {
                pg_dir = pgd_offset_k(address);
                if (pgd_none(*pg_dir)) {
@@ -157,30 +160,26 @@ static void __init kasan_early_pgtable_populate(unsigned long address,
 
                pm_dir = pmd_offset(pu_dir, address);
                if (pmd_none(*pm_dir)) {
-                       if (mode == POPULATE_ZERO_SHADOW &&
-                           IS_ALIGNED(address, PMD_SIZE) &&
+                       if (IS_ALIGNED(address, PMD_SIZE) &&
                            end - address >= PMD_SIZE) {
-                               pmd_populate(&init_mm, pm_dir,
-                                               kasan_early_shadow_pte);
-                               address = (address + PMD_SIZE) & PMD_MASK;
-                               continue;
-                       }
-                       /* the first megabyte of 1:1 is mapped with 4k pages */
-                       if (has_edat && address && end - address >= PMD_SIZE &&
-                           mode != POPULATE_ZERO_SHADOW) {
-                               void *page;
-
-                               if (mode == POPULATE_ONE2ONE) {
-                                       page = (void *)address;
-                               } else {
-                                       page = kasan_early_alloc_segment();
-                                       memset(page, 0, _SEGMENT_SIZE);
+                               if (mode == POPULATE_ZERO_SHADOW) {
+                                       pmd_populate(&init_mm, pm_dir, kasan_early_shadow_pte);
+                                       address = (address + PMD_SIZE) & PMD_MASK;
+                                       continue;
+                               } else if (has_edat && address) {
+                                       void *page;
+
+                                       if (mode == POPULATE_ONE2ONE) {
+                                               page = (void *)address;
+                                       } else {
+                                               page = kasan_early_alloc_segment();
+                                               memset(page, 0, _SEGMENT_SIZE);
+                                       }
+                                       pmd_val(*pm_dir) = __pa(page) | sgt_prot;
+                                       address = (address + PMD_SIZE) & PMD_MASK;
+                                       continue;
                                }
-                               pmd_val(*pm_dir) = __pa(page) | sgt_prot;
-                               address = (address + PMD_SIZE) & PMD_MASK;
-                               continue;
                        }
-
                        pt_dir = kasan_early_pte_alloc();
                        pmd_populate(&init_mm, pm_dir, pt_dir);
                } else if (pmd_large(*pm_dir)) {
@@ -300,7 +299,7 @@ void __init kasan_early_init(void)
        pgalloc_low = round_up((unsigned long)_end, _SEGMENT_SIZE);
        if (IS_ENABLED(CONFIG_BLK_DEV_INITRD)) {
                initrd_end =
-                   round_up(INITRD_START + INITRD_SIZE, _SEGMENT_SIZE);
+                   round_up(initrd_data.start + initrd_data.size, _SEGMENT_SIZE);
                pgalloc_low = max(pgalloc_low, initrd_end);
        }
 
index a0f54bd..9663ce3 100644 (file)
@@ -228,7 +228,7 @@ void *xlate_dev_mem_ptr(phys_addr_t addr)
        void *bounce = (void *) addr;
        unsigned long size;
 
-       get_online_cpus();
+       cpus_read_lock();
        preempt_disable();
        if (is_swapped(addr)) {
                size = PAGE_SIZE - (addr & ~PAGE_MASK);
@@ -237,7 +237,7 @@ void *xlate_dev_mem_ptr(phys_addr_t addr)
                        memcpy_absolute(bounce, (void *) addr, size);
        }
        preempt_enable();
-       put_online_cpus();
+       cpus_read_unlock();
        return bounce;
 }
 
index 68b1530..18a6381 100644 (file)
@@ -228,46 +228,3 @@ void arch_set_page_dat(struct page *page, int order)
                return;
        set_page_stable_dat(page, order);
 }
-
-void arch_set_page_nodat(struct page *page, int order)
-{
-       if (cmma_flag < 2)
-               return;
-       set_page_stable_nodat(page, order);
-}
-
-int arch_test_page_nodat(struct page *page)
-{
-       unsigned char state;
-
-       if (cmma_flag < 2)
-               return 0;
-       state = get_page_state(page);
-       return !!(state & 0x20);
-}
-
-void arch_set_page_states(int make_stable)
-{
-       unsigned long flags, order, t;
-       struct list_head *l;
-       struct page *page;
-       struct zone *zone;
-
-       if (!cmma_flag)
-               return;
-       if (make_stable)
-               drain_local_pages(NULL);
-       for_each_populated_zone(zone) {
-               spin_lock_irqsave(&zone->lock, flags);
-               for_each_migratetype_order(order, t) {
-                       list_for_each(l, &zone->free_area[order].free_list[t]) {
-                               page = list_entry(l, struct page, lru);
-                               if (make_stable)
-                                       set_page_stable_dat(page, order);
-                               else
-                                       set_page_unused(page, order);
-                       }
-               }
-               spin_unlock_irqrestore(&zone->lock, flags);
-       }
-}
index ed8e5b3..fdc86c0 100644 (file)
@@ -8,6 +8,7 @@
 #include <asm/cacheflush.h>
 #include <asm/facility.h>
 #include <asm/pgalloc.h>
+#include <asm/kfence.h>
 #include <asm/page.h>
 #include <asm/set_memory.h>
 
@@ -85,6 +86,8 @@ static int walk_pte_level(pmd_t *pmdp, unsigned long addr, unsigned long end,
 {
        pte_t *ptep, new;
 
+       if (flags == SET_MEMORY_4K)
+               return 0;
        ptep = pte_offset_kernel(pmdp, addr);
        do {
                new = *ptep;
@@ -155,6 +158,7 @@ static int walk_pmd_level(pud_t *pudp, unsigned long addr, unsigned long end,
                          unsigned long flags)
 {
        unsigned long next;
+       int need_split;
        pmd_t *pmdp;
        int rc = 0;
 
@@ -164,7 +168,10 @@ static int walk_pmd_level(pud_t *pudp, unsigned long addr, unsigned long end,
                        return -EINVAL;
                next = pmd_addr_end(addr, end);
                if (pmd_large(*pmdp)) {
-                       if (addr & ~PMD_MASK || addr + PMD_SIZE > next) {
+                       need_split  = !!(flags & SET_MEMORY_4K);
+                       need_split |= !!(addr & ~PMD_MASK);
+                       need_split |= !!(addr + PMD_SIZE > next);
+                       if (need_split) {
                                rc = split_pmd_page(pmdp, addr);
                                if (rc)
                                        return rc;
@@ -232,6 +239,7 @@ static int walk_pud_level(p4d_t *p4d, unsigned long addr, unsigned long end,
                          unsigned long flags)
 {
        unsigned long next;
+       int need_split;
        pud_t *pudp;
        int rc = 0;
 
@@ -241,7 +249,10 @@ static int walk_pud_level(p4d_t *p4d, unsigned long addr, unsigned long end,
                        return -EINVAL;
                next = pud_addr_end(addr, end);
                if (pud_large(*pudp)) {
-                       if (addr & ~PUD_MASK || addr + PUD_SIZE > next) {
+                       need_split  = !!(flags & SET_MEMORY_4K);
+                       need_split |= !!(addr & ~PUD_MASK);
+                       need_split |= !!(addr + PUD_SIZE > next);
+                       if (need_split) {
                                rc = split_pud_page(pudp, addr);
                                if (rc)
                                        break;
@@ -316,7 +327,7 @@ int __set_memory(unsigned long addr, int numpages, unsigned long flags)
        return change_page_attr(addr, addr + numpages * PAGE_SIZE, flags);
 }
 
-#ifdef CONFIG_DEBUG_PAGEALLOC
+#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
 
 static void ipte_range(pte_t *pte, unsigned long address, int nr)
 {
@@ -340,7 +351,7 @@ void __kernel_map_pages(struct page *page, int numpages, int enable)
        pte_t *pte;
 
        for (i = 0; i < numpages;) {
-               address = page_to_phys(page + i);
+               address = (unsigned long)page_to_virt(page + i);
                pte = virt_to_kpte(address);
                nr = (unsigned long)pte >> ilog2(sizeof(long));
                nr = PTRS_PER_PTE - (nr & (PTRS_PER_PTE - 1));
index 96897fa..2b1c6d9 100644 (file)
@@ -581,7 +581,7 @@ void __init vmem_map_init(void)
        __set_memory((unsigned long)_sinittext,
                     (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
                     SET_MEMORY_RO | SET_MEMORY_X);
-       __set_memory(__stext_dma, (__etext_dma - __stext_dma) >> PAGE_SHIFT,
+       __set_memory(__stext_amode31, (__etext_amode31 - __stext_amode31) >> PAGE_SHIFT,
                     SET_MEMORY_RO | SET_MEMORY_X);
 
        /* we need lowcore executable for our LPSWE instructions */
index 2ae419f..8841926 100644 (file)
@@ -1153,6 +1153,11 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
                        break;
                }
                break;
+       /*
+        * BPF_NOSPEC (speculation barrier)
+        */
+       case BPF_ST | BPF_NOSPEC:
+               break;
        /*
         * BPF_ST(X)
         */
index b0993e0..e7e6788 100644 (file)
@@ -113,13 +113,16 @@ int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas,
 {
        u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_REG_IOAT);
        struct zpci_fib fib = {0};
-       u8 status;
+       u8 cc, status;
 
        WARN_ON_ONCE(iota & 0x3fff);
        fib.pba = base;
        fib.pal = limit;
        fib.iota = iota | ZPCI_IOTA_RTTO_FLAG;
-       return zpci_mod_fc(req, &fib, &status) ? -EIO : 0;
+       cc = zpci_mod_fc(req, &fib, &status);
+       if (cc)
+               zpci_dbg(3, "reg ioat fid:%x, cc:%d, status:%d\n", zdev->fid, cc, status);
+       return cc;
 }
 
 /* Modify PCI: Unregister I/O address translation parameters */
@@ -130,9 +133,9 @@ int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas)
        u8 cc, status;
 
        cc = zpci_mod_fc(req, &fib, &status);
-       if (cc == 3) /* Function already gone. */
-               cc = 0;
-       return cc ? -EIO : 0;
+       if (cc)
+               zpci_dbg(3, "unreg ioat fid:%x, cc:%d, status:%d\n", zdev->fid, cc, status);
+       return cc;
 }
 
 /* Modify PCI: Set PCI function measurement parameters */
@@ -560,9 +563,12 @@ static void zpci_cleanup_bus_resources(struct zpci_dev *zdev)
 
 int pcibios_add_device(struct pci_dev *pdev)
 {
+       struct zpci_dev *zdev = to_zpci(pdev);
        struct resource *res;
        int i;
 
+       /* The pdev has a reference to the zdev via its bus */
+       zpci_zdev_get(zdev);
        if (pdev->is_physfn)
                pdev->no_vf_scan = 1;
 
@@ -582,7 +588,10 @@ int pcibios_add_device(struct pci_dev *pdev)
 
 void pcibios_release_device(struct pci_dev *pdev)
 {
+       struct zpci_dev *zdev = to_zpci(pdev);
+
        zpci_unmap_resources(pdev);
+       zpci_zdev_put(zdev);
 }
 
 int pcibios_enable_device(struct pci_dev *pdev, int mask)
@@ -653,32 +662,37 @@ void zpci_free_domain(int domain)
 
 int zpci_enable_device(struct zpci_dev *zdev)
 {
-       int rc;
-
-       rc = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES);
-       if (rc)
-               goto out;
-
-       rc = zpci_dma_init_device(zdev);
-       if (rc)
-               goto out_dma;
+       u32 fh = zdev->fh;
+       int rc = 0;
 
-       return 0;
-
-out_dma:
-       clp_disable_fh(zdev);
-out:
+       if (clp_enable_fh(zdev, &fh, ZPCI_NR_DMA_SPACES))
+               rc = -EIO;
+       else
+               zdev->fh = fh;
        return rc;
 }
 
 int zpci_disable_device(struct zpci_dev *zdev)
 {
-       zpci_dma_exit_device(zdev);
-       /*
-        * The zPCI function may already be disabled by the platform, this is
-        * detected in clp_disable_fh() which becomes a no-op.
-        */
-       return clp_disable_fh(zdev);
+       u32 fh = zdev->fh;
+       int cc, rc = 0;
+
+       cc = clp_disable_fh(zdev, &fh);
+       if (!cc) {
+               zdev->fh = fh;
+       } else if (cc == CLP_RC_SETPCIFN_ALRDY) {
+               pr_info("Disabling PCI function %08x had no effect as it was already disabled\n",
+                       zdev->fid);
+               /* Function is already disabled - update handle */
+               rc = clp_refresh_fh(zdev->fid, &fh);
+               if (!rc) {
+                       zdev->fh = fh;
+                       rc = -EINVAL;
+               }
+       } else {
+               rc = -EIO;
+       }
+       return rc;
 }
 
 /**
@@ -788,6 +802,11 @@ int zpci_deconfigure_device(struct zpci_dev *zdev)
        if (zdev->zbus->bus)
                zpci_bus_remove_device(zdev, false);
 
+       if (zdev->dma_table) {
+               rc = zpci_dma_exit_device(zdev);
+               if (rc)
+                       return rc;
+       }
        if (zdev_enabled(zdev)) {
                rc = zpci_disable_device(zdev);
                if (rc)
@@ -811,6 +830,8 @@ void zpci_release_device(struct kref *kref)
        if (zdev->zbus->bus)
                zpci_bus_remove_device(zdev, false);
 
+       if (zdev->dma_table)
+               zpci_dma_exit_device(zdev);
        if (zdev_enabled(zdev))
                zpci_disable_device(zdev);
 
@@ -822,7 +843,8 @@ void zpci_release_device(struct kref *kref)
        case ZPCI_FN_STATE_STANDBY:
                if (zdev->has_hp_slot)
                        zpci_exit_slot(zdev);
-               zpci_cleanup_bus_resources(zdev);
+               if (zdev->has_resources)
+                       zpci_cleanup_bus_resources(zdev);
                zpci_bus_device_unregister(zdev);
                zpci_destroy_iommu(zdev);
                fallthrough;
@@ -886,7 +908,6 @@ static void zpci_mem_exit(void)
 }
 
 static unsigned int s390_pci_probe __initdata = 1;
-static unsigned int s390_pci_no_mio __initdata;
 unsigned int s390_pci_force_floating __initdata;
 static unsigned int s390_pci_initialized;
 
@@ -897,7 +918,7 @@ char * __init pcibios_setup(char *str)
                return NULL;
        }
        if (!strcmp(str, "nomio")) {
-               s390_pci_no_mio = 1;
+               S390_lowcore.machine_flags &= ~MACHINE_FLAG_PCI_MIO;
                return NULL;
        }
        if (!strcmp(str, "force_floating")) {
@@ -928,7 +949,7 @@ static int __init pci_base_init(void)
                return 0;
        }
 
-       if (test_facility(153) && !s390_pci_no_mio) {
+       if (MACHINE_HAS_PCI_MIO) {
                static_branch_enable(&have_mio);
                ctl_set_bit(2, 5);
        }
index 9629f97..5d77acb 100644 (file)
@@ -49,6 +49,11 @@ static int zpci_bus_prepare_device(struct zpci_dev *zdev)
                rc = zpci_enable_device(zdev);
                if (rc)
                        return rc;
+               rc = zpci_dma_init_device(zdev);
+               if (rc) {
+                       zpci_disable_device(zdev);
+                       return rc;
+               }
        }
 
        if (!zdev->has_resources) {
@@ -343,11 +348,11 @@ static int zpci_bus_add_device(struct zpci_bus *zbus, struct zpci_dev *zdev)
 {
        int rc = -EINVAL;
 
-       zdev->zbus = zbus;
        if (zbus->function[zdev->devfn]) {
                pr_err("devfn %04x is already assigned\n", zdev->devfn);
                return rc;
        }
+       zdev->zbus = zbus;
        zbus->function[zdev->devfn] = zdev;
        zpci_nb_devices++;
 
@@ -367,6 +372,7 @@ static int zpci_bus_add_device(struct zpci_bus *zbus, struct zpci_dev *zdev)
 
 error:
        zbus->function[zdev->devfn] = NULL;
+       zdev->zbus = NULL;
        zpci_nb_devices--;
        return rc;
 }
index b877a97..e359d26 100644 (file)
@@ -22,6 +22,11 @@ static inline void zpci_zdev_put(struct zpci_dev *zdev)
        kref_put(&zdev->kref, zpci_release_device);
 }
 
+static inline void zpci_zdev_get(struct zpci_dev *zdev)
+{
+       kref_get(&zdev->kref);
+}
+
 int zpci_alloc_domain(int domain);
 void zpci_free_domain(int domain);
 int zpci_setup_bus_resources(struct zpci_dev *zdev,
index d333159..51dc221 100644 (file)
@@ -212,17 +212,22 @@ out:
        return rc;
 }
 
-static int clp_refresh_fh(u32 fid);
-/*
- * Enable/Disable a given PCI function and update its function handle if
- * necessary
+/**
+ * clp_set_pci_fn() - Execute a command on a PCI function
+ * @zdev: Function that will be affected
+ * @fh: Out parameter for updated function handle
+ * @nr_dma_as: DMA address space number
+ * @command: The command code to execute
+ *
+ * Returns: 0 on success, < 0 for Linux errors (e.g. -ENOMEM), and
+ * > 0 for non-success platform responses
  */
-static int clp_set_pci_fn(struct zpci_dev *zdev, u8 nr_dma_as, u8 command)
+static int clp_set_pci_fn(struct zpci_dev *zdev, u32 *fh, u8 nr_dma_as, u8 command)
 {
        struct clp_req_rsp_set_pci *rrb;
        int rc, retries = 100;
-       u32 fid = zdev->fid;
 
+       *fh = 0;
        rrb = clp_alloc_block(GFP_KERNEL);
        if (!rrb)
                return -ENOMEM;
@@ -245,17 +250,13 @@ static int clp_set_pci_fn(struct zpci_dev *zdev, u8 nr_dma_as, u8 command)
                }
        } while (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY);
 
-       if (rc || rrb->response.hdr.rsp != CLP_RC_OK) {
+       if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
+               *fh = rrb->response.fh;
+       } else {
                zpci_err("Set PCI FN:\n");
                zpci_err_clp(rrb->response.hdr.rsp, rc);
-       }
-
-       if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
-               zdev->fh = rrb->response.fh;
-       } else if (!rc && rrb->response.hdr.rsp == CLP_RC_SETPCIFN_ALRDY &&
-                       rrb->response.fh == 0) {
-               /* Function is already in desired state - update handle */
-               rc = clp_refresh_fh(fid);
+               if (!rc)
+                       rc = rrb->response.hdr.rsp;
        }
        clp_free_block(rrb);
        return rc;
@@ -295,35 +296,62 @@ int clp_setup_writeback_mio(void)
        return rc;
 }
 
-int clp_enable_fh(struct zpci_dev *zdev, u8 nr_dma_as)
+int clp_enable_fh(struct zpci_dev *zdev, u32 *fh, u8 nr_dma_as)
 {
        int rc;
 
-       rc = clp_set_pci_fn(zdev, nr_dma_as, CLP_SET_ENABLE_PCI_FN);
-       zpci_dbg(3, "ena fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
-       if (rc)
-               goto out;
-
-       if (zpci_use_mio(zdev)) {
-               rc = clp_set_pci_fn(zdev, nr_dma_as, CLP_SET_ENABLE_MIO);
+       rc = clp_set_pci_fn(zdev, fh, nr_dma_as, CLP_SET_ENABLE_PCI_FN);
+       zpci_dbg(3, "ena fid:%x, fh:%x, rc:%d\n", zdev->fid, *fh, rc);
+       if (!rc && zpci_use_mio(zdev)) {
+               rc = clp_set_pci_fn(zdev, fh, nr_dma_as, CLP_SET_ENABLE_MIO);
                zpci_dbg(3, "ena mio fid:%x, fh:%x, rc:%d\n",
-                               zdev->fid, zdev->fh, rc);
+                               zdev->fid, *fh, rc);
                if (rc)
-                       clp_disable_fh(zdev);
+                       clp_disable_fh(zdev, fh);
        }
-out:
        return rc;
 }
 
-int clp_disable_fh(struct zpci_dev *zdev)
+int clp_disable_fh(struct zpci_dev *zdev, u32 *fh)
 {
        int rc;
 
        if (!zdev_enabled(zdev))
                return 0;
 
-       rc = clp_set_pci_fn(zdev, 0, CLP_SET_DISABLE_PCI_FN);
-       zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
+       rc = clp_set_pci_fn(zdev, fh, 0, CLP_SET_DISABLE_PCI_FN);
+       zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, *fh, rc);
+       return rc;
+}
+
+static int clp_list_pci_req(struct clp_req_rsp_list_pci *rrb,
+                           u64 *resume_token, int *nentries)
+{
+       int rc;
+
+       memset(rrb, 0, sizeof(*rrb));
+       rrb->request.hdr.len = sizeof(rrb->request);
+       rrb->request.hdr.cmd = CLP_LIST_PCI;
+       /* store as many entries as possible */
+       rrb->response.hdr.len = CLP_BLK_SIZE - LIST_PCI_HDR_LEN;
+       rrb->request.resume_token = *resume_token;
+
+       /* Get PCI function handle list */
+       rc = clp_req(rrb, CLP_LPS_PCI);
+       if (rc || rrb->response.hdr.rsp != CLP_RC_OK) {
+               zpci_err("List PCI FN:\n");
+               zpci_err_clp(rrb->response.hdr.rsp, rc);
+               return -EIO;
+       }
+
+       update_uid_checking(rrb->response.uid_checking);
+       WARN_ON_ONCE(rrb->response.entry_size !=
+               sizeof(struct clp_fh_list_entry));
+
+       *nentries = (rrb->response.hdr.len - LIST_PCI_HDR_LEN) /
+               rrb->response.entry_size;
+       *resume_token = rrb->response.resume_token;
+
        return rc;
 }
 
@@ -331,38 +359,40 @@ static int clp_list_pci(struct clp_req_rsp_list_pci *rrb, void *data,
                        void (*cb)(struct clp_fh_list_entry *, void *))
 {
        u64 resume_token = 0;
-       int entries, i, rc;
+       int nentries, i, rc;
 
        do {
-               memset(rrb, 0, sizeof(*rrb));
-               rrb->request.hdr.len = sizeof(rrb->request);
-               rrb->request.hdr.cmd = CLP_LIST_PCI;
-               /* store as many entries as possible */
-               rrb->response.hdr.len = CLP_BLK_SIZE - LIST_PCI_HDR_LEN;
-               rrb->request.resume_token = resume_token;
-
-               /* Get PCI function handle list */
-               rc = clp_req(rrb, CLP_LPS_PCI);
-               if (rc || rrb->response.hdr.rsp != CLP_RC_OK) {
-                       zpci_err("List PCI FN:\n");
-                       zpci_err_clp(rrb->response.hdr.rsp, rc);
-                       rc = -EIO;
-                       goto out;
-               }
+               rc = clp_list_pci_req(rrb, &resume_token, &nentries);
+               if (rc)
+                       return rc;
+               for (i = 0; i < nentries; i++)
+                       cb(&rrb->response.fh_list[i], data);
+       } while (resume_token);
 
-               update_uid_checking(rrb->response.uid_checking);
-               WARN_ON_ONCE(rrb->response.entry_size !=
-                       sizeof(struct clp_fh_list_entry));
+       return rc;
+}
 
-               entries = (rrb->response.hdr.len - LIST_PCI_HDR_LEN) /
-                       rrb->response.entry_size;
+static int clp_find_pci(struct clp_req_rsp_list_pci *rrb, u32 fid,
+                       struct clp_fh_list_entry *entry)
+{
+       struct clp_fh_list_entry *fh_list;
+       u64 resume_token = 0;
+       int nentries, i, rc;
 
-               resume_token = rrb->response.resume_token;
-               for (i = 0; i < entries; i++)
-                       cb(&rrb->response.fh_list[i], data);
+       do {
+               rc = clp_list_pci_req(rrb, &resume_token, &nentries);
+               if (rc)
+                       return rc;
+               for (i = 0; i < nentries; i++) {
+                       fh_list = rrb->response.fh_list;
+                       if (fh_list[i].fid == fid) {
+                               *entry = fh_list[i];
+                               return 0;
+                       }
+               }
        } while (resume_token);
-out:
-       return rc;
+
+       return -ENODEV;
 }
 
 static void __clp_add(struct clp_fh_list_entry *entry, void *data)
@@ -392,67 +422,41 @@ int clp_scan_pci_devices(void)
        return rc;
 }
 
-static void __clp_refresh_fh(struct clp_fh_list_entry *entry, void *data)
-{
-       struct zpci_dev *zdev;
-       u32 fid = *((u32 *)data);
-
-       if (!entry->vendor_id || fid != entry->fid)
-               return;
-
-       zdev = get_zdev_by_fid(fid);
-       if (!zdev)
-               return;
-
-       zdev->fh = entry->fh;
-}
-
 /*
- * Refresh the function handle of the function matching @fid
+ * Get the current function handle of the function matching @fid
  */
-static int clp_refresh_fh(u32 fid)
+int clp_refresh_fh(u32 fid, u32 *fh)
 {
        struct clp_req_rsp_list_pci *rrb;
+       struct clp_fh_list_entry entry;
        int rc;
 
        rrb = clp_alloc_block(GFP_NOWAIT);
        if (!rrb)
                return -ENOMEM;
 
-       rc = clp_list_pci(rrb, &fid, __clp_refresh_fh);
+       rc = clp_find_pci(rrb, fid, &entry);
+       if (!rc)
+               *fh = entry.fh;
 
        clp_free_block(rrb);
        return rc;
 }
 
-struct clp_state_data {
-       u32 fid;
-       enum zpci_state state;
-};
-
-static void __clp_get_state(struct clp_fh_list_entry *entry, void *data)
-{
-       struct clp_state_data *sd = data;
-
-       if (entry->fid != sd->fid)
-               return;
-
-       sd->state = entry->config_state;
-}
-
 int clp_get_state(u32 fid, enum zpci_state *state)
 {
        struct clp_req_rsp_list_pci *rrb;
-       struct clp_state_data sd = {fid, ZPCI_FN_STATE_RESERVED};
+       struct clp_fh_list_entry entry;
        int rc;
 
+       *state = ZPCI_FN_STATE_RESERVED;
        rrb = clp_alloc_block(GFP_ATOMIC);
        if (!rrb)
                return -ENOMEM;
 
-       rc = clp_list_pci(rrb, &sd, __clp_get_state);
+       rc = clp_find_pci(rrb, fid, &entry);
        if (!rc)
-               *state = sd.state;
+               *state = entry.config_state;
 
        clp_free_block(rrb);
        return rc;
index ebc9a49..58f2f7a 100644 (file)
@@ -590,10 +590,11 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
                }
 
        }
-       rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
-                               (u64) zdev->dma_table);
-       if (rc)
+       if (zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
+                              (u64)zdev->dma_table)) {
+               rc = -EIO;
                goto free_bitmap;
+       }
 
        return 0;
 free_bitmap:
@@ -608,17 +609,25 @@ out:
        return rc;
 }
 
-void zpci_dma_exit_device(struct zpci_dev *zdev)
+int zpci_dma_exit_device(struct zpci_dev *zdev)
 {
+       int cc = 0;
+
        /*
         * At this point, if the device is part of an IOMMU domain, this would
         * be a strong hint towards a bug in the IOMMU API (common) code and/or
         * simultaneous access via IOMMU and DMA API. So let's issue a warning.
         */
        WARN_ON(zdev->s390_domain);
-
-       if (zpci_unregister_ioat(zdev, 0))
-               return;
+       if (zdev_enabled(zdev))
+               cc = zpci_unregister_ioat(zdev, 0);
+       /*
+        * cc == 3 indicates the function is gone already. This can happen
+        * if the function was deconfigured/disabled suddenly and we have not
+        * received a new handle yet.
+        */
+       if (cc && cc != 3)
+               return -EIO;
 
        dma_cleanup_tables(zdev->dma_table);
        zdev->dma_table = NULL;
@@ -626,8 +635,8 @@ void zpci_dma_exit_device(struct zpci_dev *zdev)
        zdev->iommu_bitmap = NULL;
        vfree(zdev->lazy_bitmap);
        zdev->lazy_bitmap = NULL;
-
        zdev->next_bit = 0;
+       return 0;
 }
 
 static int __init dma_alloc_cpu_table_caches(void)
index cd447b9..c856f80 100644 (file)
@@ -84,7 +84,10 @@ static void zpci_event_hard_deconfigured(struct zpci_dev *zdev, u32 fh)
        /* Even though the device is already gone we still
         * need to free zPCI resources as part of the disable.
         */
-       zpci_disable_device(zdev);
+       if (zdev->dma_table)
+               zpci_dma_exit_device(zdev);
+       if (zdev_enabled(zdev))
+               zpci_disable_device(zdev);
        zdev->state = ZPCI_FN_STATE_STANDBY;
 }
 
index 9c7de90..3823e15 100644 (file)
@@ -365,10 +365,6 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev)
        for_each_pci_msi_entry(msi, pdev) {
                if (!msi->irq)
                        continue;
-               if (msi->msi_attrib.is_msix)
-                       __pci_msix_desc_mask_irq(msi, 1);
-               else
-                       __pci_msi_desc_mask_irq(msi, 1, 1);
                irq_set_msi_desc(msi->irq, NULL);
                irq_free_desc(msi->irq);
                msi->msg.address_lo = 0;
index 6e2450c..335c281 100644 (file)
@@ -82,13 +82,26 @@ static ssize_t recover_store(struct device *dev, struct device_attribute *attr,
        pci_lock_rescan_remove();
        if (pci_dev_is_added(pdev)) {
                pci_stop_and_remove_bus_device(pdev);
-               ret = zpci_disable_device(zdev);
-               if (ret)
-                       goto out;
+               if (zdev->dma_table) {
+                       ret = zpci_dma_exit_device(zdev);
+                       if (ret)
+                               goto out;
+               }
+
+               if (zdev_enabled(zdev)) {
+                       ret = zpci_disable_device(zdev);
+                       if (ret)
+                               goto out;
+               }
 
                ret = zpci_enable_device(zdev);
                if (ret)
                        goto out;
+               ret = zpci_dma_init_device(zdev);
+               if (ret) {
+                       zpci_disable_device(zdev);
+                       goto out;
+               }
                pci_rescan_bus(zdev->zbus->bus);
        }
 out:
index 21c4ebe..360ada8 100644 (file)
@@ -19,6 +19,7 @@ KCOV_INSTRUMENT := n
 GCOV_PROFILE := n
 UBSAN_SANITIZE := n
 KASAN_SANITIZE := n
+KCSAN_SANITIZE := n
 
 KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes
 KBUILD_CFLAGS += -Wno-pointer-sign -Wno-sign-compare
index 0e207c4..6db9820 100644 (file)
@@ -189,6 +189,8 @@ ad  stosm   SI_URD
 ae     sigp    RS_RRRD
 af     mc      SI_URD
 b1     lra     RX_RRRD
+b200   lbear   S_RD
+b201   stbear  S_RD
 b202   stidp   S_RD
 b204   sck     S_RD
 b205   stck    S_RD
@@ -523,6 +525,7 @@ b931        clgfr   RRE_RR
 b938   sortl   RRE_RR
 b939   dfltcc  RRF_R0RR2
 b93a   kdsa    RRE_RR
+b93b   nnpa    RRE_00
 b93c   ppno    RRE_RR
 b93e   kimd    RRE_RR
 b93f   klmd    RRE_RR
@@ -562,6 +565,7 @@ b987        dlgr    RRE_RR
 b988   alcgr   RRE_RR
 b989   slbgr   RRE_RR
 b98a   cspg    RRE_RR
+b98b   rdp     RRF_RURR2
 b98d   epsw    RRE_RR
 b98e   idte    RRF_RURR2
 b98f   crdte   RRF_RURR2
@@ -876,19 +880,32 @@ e63d      vstrl   VSI_URDV
 e63f   vstrlr  VRS_RRDV
 e649   vlip    VRI_V0UU2
 e650   vcvb    VRR_RV0UU
+e651   vclzdp  VRR_VV0U2
 e652   vcvbg   VRR_RV0UU
+e654   vupkzh  VRR_VV0U2
+e655   vcnf    VRR_VV0UU2
+e656   vclfnh  VRR_VV0UU2
 e658   vcvd    VRI_VR0UU
 e659   vsrp    VRI_VVUUU2
 e65a   vcvdg   VRI_VR0UU
 e65b   vpsop   VRI_VVUUU2
+e65c   vupkzl  VRR_VV0U2
+e65d   vcfn    VRR_VV0UU2
+e65e   vclfnl  VRR_VV0UU2
 e65f   vtp     VRR_0V
+e670   vpkzr   VRI_VVV0UU2
 e671   vap     VRI_VVV0UU2
+e672   vsrpr   VRI_VVV0UU2
 e673   vsp     VRI_VVV0UU2
+e674   vschp   VRR_VVV0U0U
+e675   vcrnf   VRR_VVV0UU
 e677   vcp     VRR_0VV0U
 e678   vmp     VRI_VVV0UU2
 e679   vmsp    VRI_VVV0UU2
 e67a   vdp     VRI_VVV0UU2
 e67b   vrp     VRI_VVV0UU2
+e67c   vscshp  VRR_VVV
+e67d   vcsph   VRR_VVV0U0
 e67e   vsdp    VRI_VVV0UU2
 e700   vleb    VRX_VRRDU
 e701   vleh    VRX_VRRDU
@@ -1081,6 +1098,7 @@ eb61      stric   RSY_RDRU
 eb62   mric    RSY_RDRU
 eb6a   asi     SIY_IRD
 eb6e   alsi    SIY_IRD
+eb71   lpswey  SIY_URD
 eb7a   agsi    SIY_IRD
 eb7e   algsi   SIY_IRD
 eb80   icmh    RSY_RURD
index 45a0549..b683b69 100644 (file)
@@ -39,7 +39,6 @@ config SUPERH
        select HAVE_FUTEX_CMPXCHG if FUTEX
        select HAVE_FTRACE_MCOUNT_RECORD
        select HAVE_HW_BREAKPOINT
-       select HAVE_IDE if HAS_IOPORT_MAP
        select HAVE_IOREMAP_PROT if MMU && !X2TLB
        select HAVE_KERNEL_BZIP2
        select HAVE_KERNEL_GZIP
index 1aedbfe..f9f3b14 100644 (file)
@@ -38,7 +38,7 @@ static void se7343_irq_demux(struct irq_desc *desc)
        mask = ioread16(se7343_irq_regs + PA_CPLD_ST_REG);
 
        for_each_set_bit(bit, &mask, SE7343_FPGA_IRQ_NR)
-               generic_handle_irq(irq_linear_revmap(se7343_irq_domain, bit));
+               generic_handle_domain_irq(se7343_irq_domain, bit);
 
        chip->irq_unmask(data);
 }
index 6d34592..efa96ed 100644 (file)
@@ -37,7 +37,7 @@ static void se7722_irq_demux(struct irq_desc *desc)
        mask = ioread16(se7722_irq_regs + IRQ01_STS_REG);
 
        for_each_set_bit(bit, &mask, SE7722_FPGA_IRQ_NR)
-               generic_handle_irq(irq_linear_revmap(se7722_irq_domain, bit));
+               generic_handle_domain_irq(se7722_irq_domain, bit);
 
        chip->irq_unmask(data);
 }
index efc992f..f82d3a6 100644 (file)
@@ -68,7 +68,7 @@ static void x3proto_gpio_irq_handler(struct irq_desc *desc)
 
        mask = __raw_readw(KEYDETR);
        for_each_set_bit(pin, &mask, NR_BASEBOARD_GPIOS)
-               generic_handle_irq(irq_linear_revmap(x3proto_irq_domain, pin));
+               generic_handle_domain_irq(x3proto_irq_domain, pin);
 
        chip->irq_unmask(data);
 }
index c5fa793..f0c0f95 100644 (file)
@@ -19,7 +19,6 @@ config SPARC
        select OF
        select OF_PROMTREE
        select HAVE_ASM_MODVERSIONS
-       select HAVE_IDE
        select HAVE_ARCH_KGDB if !SMP || SPARC64
        select HAVE_ARCH_TRACEHOOK
        select HAVE_ARCH_SECCOMP if SPARC64
index 4b8d3c6..9a2f20c 100644 (file)
@@ -1287,6 +1287,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
                        return 1;
                break;
        }
+       /* speculation barrier */
+       case BPF_ST | BPF_NOSPEC:
+               break;
        /* ST: *(size *)(dst + off) = imm */
        case BPF_ST | BPF_MEM | BPF_W:
        case BPF_ST | BPF_MEM | BPF_H:
index e497185..cd9dc05 100644 (file)
@@ -1268,8 +1268,7 @@ static void ubd_map_req(struct ubd *dev, struct io_thread_req *io_req,
                rq_for_each_segment(bvec, req, iter) {
                        BUG_ON(i >= io_req->desc_cnt);
 
-                       io_req->io_desc[i].buffer =
-                               page_address(bvec.bv_page) + bvec.bv_offset;
+                       io_req->io_desc[i].buffer = bvec_virt(&bvec);
                        io_req->io_desc[i].length = bvec.bv_len;
                        i++;
                }
index 4927065..421fa9e 100644 (file)
@@ -119,6 +119,7 @@ config X86
        select ARCH_WANT_HUGE_PMD_SHARE
        select ARCH_WANT_LD_ORPHAN_WARN
        select ARCH_WANTS_THP_SWAP              if X86_64
+       select ARCH_HAS_PARANOID_L1D_FLUSH
        select BUILDTIME_TABLE_SORT
        select CLKEVT_I8253
        select CLOCKSOURCE_VALIDATE_LAST_CYCLE
@@ -202,7 +203,6 @@ config X86
        select HAVE_FUNCTION_TRACER
        select HAVE_GCC_PLUGINS
        select HAVE_HW_BREAKPOINT
-       select HAVE_IDE
        select HAVE_IOREMAP_PROT
        select HAVE_IRQ_EXIT_ON_IRQ_STACK       if X86_64
        select HAVE_IRQ_TIME_ACCOUNTING
index 307fd00..d82d014 100644 (file)
@@ -31,8 +31,8 @@ REALMODE_CFLAGS       := -m16 -g -Os -DDISABLE_BRANCH_PROFILING \
 
 REALMODE_CFLAGS += -ffreestanding
 REALMODE_CFLAGS += -fno-stack-protector
-REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -Wno-address-of-packed-member)
-REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), $(cc_stack_align4))
+REALMODE_CFLAGS += -Wno-address-of-packed-member
+REALMODE_CFLAGS += $(cc_stack_align4)
 REALMODE_CFLAGS += $(CLANG_FLAGS)
 export REALMODE_CFLAGS
 
@@ -48,8 +48,7 @@ export BITS
 #
 #    https://gcc.gnu.org/bugzilla/show_bug.cgi?id=53383
 #
-KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow
-KBUILD_CFLAGS += $(call cc-option,-mno-avx,)
+KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx
 
 # Intel CET isn't enabled in the kernel
 KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
@@ -59,9 +58,8 @@ ifeq ($(CONFIG_X86_32),y)
         UTS_MACHINE := i386
         CHECKFLAGS += -D__i386__
 
-        biarch := $(call cc-option,-m32)
-        KBUILD_AFLAGS += $(biarch)
-        KBUILD_CFLAGS += $(biarch)
+        KBUILD_AFLAGS += -m32
+        KBUILD_CFLAGS += -m32
 
         KBUILD_CFLAGS += -msoft-float -mregparm=3 -freg-struct-return
 
@@ -72,7 +70,7 @@ ifeq ($(CONFIG_X86_32),y)
         # Align the stack to the register width instead of using the default
         # alignment of 16 bytes. This reduces stack usage and the number of
         # alignment instructions.
-        KBUILD_CFLAGS += $(call cc-option,$(cc_stack_align4))
+        KBUILD_CFLAGS += $(cc_stack_align4)
 
         # CPU-specific tuning. Anything which can be shared with UML should go here.
         include arch/x86/Makefile_32.cpu
@@ -93,7 +91,6 @@ else
         UTS_MACHINE := x86_64
         CHECKFLAGS += -D__x86_64__
 
-        biarch := -m64
         KBUILD_AFLAGS += -m64
         KBUILD_CFLAGS += -m64
 
@@ -104,7 +101,7 @@ else
         KBUILD_CFLAGS += $(call cc-option,-falign-loops=1)
 
         # Don't autogenerate traditional x87 instructions
-        KBUILD_CFLAGS += $(call cc-option,-mno-80387)
+        KBUILD_CFLAGS += -mno-80387
         KBUILD_CFLAGS += $(call cc-option,-mno-fp-ret-in-387)
 
         # By default gcc and clang use a stack alignment of 16 bytes for x86.
@@ -114,20 +111,17 @@ else
         # default alignment which keep the stack *mis*aligned.
         # Furthermore an alignment to the register width reduces stack usage
         # and the number of alignment instructions.
-        KBUILD_CFLAGS += $(call cc-option,$(cc_stack_align8))
+        KBUILD_CFLAGS += $(cc_stack_align8)
 
        # Use -mskip-rax-setup if supported.
        KBUILD_CFLAGS += $(call cc-option,-mskip-rax-setup)
 
         # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
-        cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
-        cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
-
-        cflags-$(CONFIG_MCORE2) += \
-                $(call cc-option,-march=core2,$(call cc-option,-mtune=generic))
-       cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom) \
-               $(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
-        cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
+        cflags-$(CONFIG_MK8)           += -march=k8
+        cflags-$(CONFIG_MPSC)          += -march=nocona
+        cflags-$(CONFIG_MCORE2)                += -march=core2
+        cflags-$(CONFIG_MATOM)         += -march=atom
+        cflags-$(CONFIG_GENERIC_CPU)   += -mtune=generic
         KBUILD_CFLAGS += $(cflags-y)
 
         KBUILD_CFLAGS += -mno-red-zone
@@ -158,18 +152,6 @@ export CONFIG_X86_X32_ABI
 ifdef CONFIG_FUNCTION_GRAPH_TRACER
   ifndef CONFIG_HAVE_FENTRY
        ACCUMULATE_OUTGOING_ARGS := 1
-  else
-    ifeq ($(call cc-option-yn, -mfentry), n)
-       ACCUMULATE_OUTGOING_ARGS := 1
-
-       # GCC ignores '-maccumulate-outgoing-args' when used with '-Os'.
-       # If '-Os' is enabled, disable it and print a warning.
-        ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
-          undefine CONFIG_CC_OPTIMIZE_FOR_SIZE
-          $(warning Disabling CONFIG_CC_OPTIMIZE_FOR_SIZE.  Your compiler does not have -mfentry so you cannot optimize for size with CONFIG_FUNCTION_GRAPH_TRACER.)
-        endif
-
-    endif
   endif
 endif
 
@@ -193,7 +175,7 @@ ifdef CONFIG_RETPOLINE
   # only been fixed starting from gcc stable version 8.4.0 and
   # onwards, but not for older ones. See gcc bug #86952.
   ifndef CONFIG_CC_IS_CLANG
-    KBUILD_CFLAGS += $(call cc-option,-fno-jump-tables)
+    KBUILD_CFLAGS += -fno-jump-tables
   endif
 endif
 
@@ -275,9 +257,10 @@ endif
 $(BOOT_TARGETS): vmlinux
        $(Q)$(MAKE) $(build)=$(boot) $@
 
-PHONY += install bzlilo
-install bzlilo:
-       $(Q)$(MAKE) $(build)=$(boot) $@
+PHONY += install
+install:
+       $(CONFIG_SHELL) $(srctree)/$(boot)/install.sh $(KERNELRELEASE) \
+               $(KBUILD_IMAGE) System.map "$(INSTALL_PATH)"
 
 PHONY += vdso_install
 vdso_install:
index dfbc26a..b5aecb5 100644 (file)
@@ -133,7 +133,7 @@ quiet_cmd_genimage = GENIMAGE $3
 cmd_genimage = $(BASH) $(srctree)/$(src)/genimage.sh $2 $3 $(obj)/bzImage \
                $(obj)/mtools.conf '$(FDARGS)' $(FDINITRD)
 
-PHONY += bzdisk fdimage fdimage144 fdimage288 hdimage isoimage install
+PHONY += bzdisk fdimage fdimage144 fdimage288 hdimage isoimage
 
 # This requires write access to /dev/fd0
 # All images require syslinux to be installed; hdimage also requires
@@ -156,8 +156,3 @@ hdimage: $(imgdeps)
 isoimage: $(imgdeps)
        $(call cmd,genimage,isoimage,$(obj)/image.iso)
        @$(kecho) 'Kernel: $(obj)/image.iso is ready'
-
-install:
-       $(CONFIG_SHELL) $(srctree)/$(src)/install.sh \
-               $(KERNELRELEASE) $(obj)/bzImage \
-               System.map "$(INSTALL_PATH)"
index 95a223b..8bb92e9 100644 (file)
@@ -5,9 +5,8 @@
  * Early support for invoking 32-bit EFI services from a 64-bit kernel.
  *
  * Because this thunking occurs before ExitBootServices() we have to
- * restore the firmware's 32-bit GDT before we make EFI service calls,
- * since the firmware's 32-bit IDT is still currently installed and it
- * needs to be able to service interrupts.
+ * restore the firmware's 32-bit GDT and IDT before we make EFI service
+ * calls.
  *
  * On the plus side, we don't have to worry about mangling 64-bit
  * addresses into 32-bits because we're executing with an identity
@@ -39,7 +38,7 @@ SYM_FUNC_START(__efi64_thunk)
        /*
         * Convert x86-64 ABI params to i386 ABI
         */
-       subq    $32, %rsp
+       subq    $64, %rsp
        movl    %esi, 0x0(%rsp)
        movl    %edx, 0x4(%rsp)
        movl    %ecx, 0x8(%rsp)
@@ -49,14 +48,19 @@ SYM_FUNC_START(__efi64_thunk)
        leaq    0x14(%rsp), %rbx
        sgdt    (%rbx)
 
+       addq    $16, %rbx
+       sidt    (%rbx)
+
        /*
-        * Switch to gdt with 32-bit segments. This is the firmware GDT
-        * that was installed when the kernel started executing. This
-        * pointer was saved at the EFI stub entry point in head_64.S.
+        * Switch to IDT and GDT with 32-bit segments. This is the firmware GDT
+        * and IDT that was installed when the kernel started executing. The
+        * pointers were saved at the EFI stub entry point in head_64.S.
         *
         * Pass the saved DS selector to the 32-bit code, and use far return to
         * restore the saved CS selector.
         */
+       leaq    efi32_boot_idt(%rip), %rax
+       lidt    (%rax)
        leaq    efi32_boot_gdt(%rip), %rax
        lgdt    (%rax)
 
@@ -67,7 +71,7 @@ SYM_FUNC_START(__efi64_thunk)
        pushq   %rax
        lretq
 
-1:     addq    $32, %rsp
+1:     addq    $64, %rsp
        movq    %rdi, %rax
 
        pop     %rbx
@@ -128,10 +132,13 @@ SYM_FUNC_START_LOCAL(efi_enter32)
 
        /*
         * Some firmware will return with interrupts enabled. Be sure to
-        * disable them before we switch GDTs.
+        * disable them before we switch GDTs and IDTs.
         */
        cli
 
+       lidtl   (%ebx)
+       subl    $16, %ebx
+
        lgdtl   (%ebx)
 
        movl    %cr4, %eax
@@ -166,6 +173,11 @@ SYM_DATA_START(efi32_boot_gdt)
        .quad   0
 SYM_DATA_END(efi32_boot_gdt)
 
+SYM_DATA_START(efi32_boot_idt)
+       .word   0
+       .quad   0
+SYM_DATA_END(efi32_boot_idt)
+
 SYM_DATA_START(efi32_boot_cs)
        .word   0
 SYM_DATA_END(efi32_boot_cs)
index a2347de..572c535 100644 (file)
@@ -319,6 +319,9 @@ SYM_INNER_LABEL(efi32_pe_stub_entry, SYM_L_LOCAL)
        movw    %cs, rva(efi32_boot_cs)(%ebp)
        movw    %ds, rva(efi32_boot_ds)(%ebp)
 
+       /* Store firmware IDT descriptor */
+       sidtl   rva(efi32_boot_idt)(%ebp)
+
        /* Disable paging */
        movl    %cr0, %eax
        btrl    $X86_CR0_PG_BIT, %eax
index e366907..67c3208 100644 (file)
@@ -668,7 +668,7 @@ static bool process_mem_region(struct mem_vector *region,
 
                if (slot_area_index == MAX_SLOT_AREA) {
                        debug_putstr("Aborted e820/efi memmap scan when walking immovable regions(slot_areas full)!\n");
-                       return 1;
+                       return true;
                }
        }
 #endif
index d0959e7..f307c93 100644 (file)
@@ -88,6 +88,12 @@ nhpoly1305-avx2-y := nh-avx2-x86_64.o nhpoly1305-avx2-glue.o
 
 obj-$(CONFIG_CRYPTO_CURVE25519_X86) += curve25519-x86_64.o
 
+obj-$(CONFIG_CRYPTO_SM4_AESNI_AVX_X86_64) += sm4-aesni-avx-x86_64.o
+sm4-aesni-avx-x86_64-y := sm4-aesni-avx-asm_64.o sm4_aesni_avx_glue.o
+
+obj-$(CONFIG_CRYPTO_SM4_AESNI_AVX2_X86_64) += sm4-aesni-avx2-x86_64.o
+sm4-aesni-avx2-x86_64-y := sm4-aesni-avx2-asm_64.o sm4_aesni_avx2_glue.o
+
 quiet_cmd_perlasm = PERLASM $@
       cmd_perlasm = $(PERL) $< > $@
 $(obj)/%.S: $(src)/%.pl FORCE
index 2144e54..0fc961b 100644 (file)
@@ -849,6 +849,8 @@ static int xts_crypt(struct skcipher_request *req, bool encrypt)
                return -EINVAL;
 
        err = skcipher_walk_virt(&walk, req, false);
+       if (!walk.nbytes)
+               return err;
 
        if (unlikely(tail > 0 && walk.nbytes < walk.total)) {
                int blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
@@ -862,7 +864,10 @@ static int xts_crypt(struct skcipher_request *req, bool encrypt)
                skcipher_request_set_crypt(&subreq, req->src, req->dst,
                                           blocks * AES_BLOCK_SIZE, req->iv);
                req = &subreq;
+
                err = skcipher_walk_virt(&walk, req, false);
+               if (err)
+                       return err;
        } else {
                tail = 0;
        }
diff --git a/arch/x86/crypto/sm4-aesni-avx-asm_64.S b/arch/x86/crypto/sm4-aesni-avx-asm_64.S
new file mode 100644 (file)
index 0000000..fa2c3f5
--- /dev/null
@@ -0,0 +1,589 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * SM4 Cipher Algorithm, AES-NI/AVX optimized.
+ * as specified in
+ * https://tools.ietf.org/id/draft-ribose-cfrg-sm4-10.html
+ *
+ * Copyright (C) 2018 Markku-Juhani O. Saarinen <mjos@iki.fi>
+ * Copyright (C) 2020 Jussi Kivilinna <jussi.kivilinna@iki.fi>
+ * Copyright (c) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
+ */
+
+/* Based on SM4 AES-NI work by libgcrypt and Markku-Juhani O. Saarinen at:
+ *  https://github.com/mjosaarinen/sm4ni
+ */
+
+#include <linux/linkage.h>
+#include <asm/frame.h>
+
+#define rRIP         (%rip)
+
+#define RX0          %xmm0
+#define RX1          %xmm1
+#define MASK_4BIT    %xmm2
+#define RTMP0        %xmm3
+#define RTMP1        %xmm4
+#define RTMP2        %xmm5
+#define RTMP3        %xmm6
+#define RTMP4        %xmm7
+
+#define RA0          %xmm8
+#define RA1          %xmm9
+#define RA2          %xmm10
+#define RA3          %xmm11
+
+#define RB0          %xmm12
+#define RB1          %xmm13
+#define RB2          %xmm14
+#define RB3          %xmm15
+
+#define RNOT         %xmm0
+#define RBSWAP       %xmm1
+
+
+/* Transpose four 32-bit words between 128-bit vectors. */
+#define transpose_4x4(x0, x1, x2, x3, t1, t2) \
+       vpunpckhdq x1, x0, t2;                \
+       vpunpckldq x1, x0, x0;                \
+                                             \
+       vpunpckldq x3, x2, t1;                \
+       vpunpckhdq x3, x2, x2;                \
+                                             \
+       vpunpckhqdq t1, x0, x1;               \
+       vpunpcklqdq t1, x0, x0;               \
+                                             \
+       vpunpckhqdq x2, t2, x3;               \
+       vpunpcklqdq x2, t2, x2;
+
+/* pre-SubByte transform. */
+#define transform_pre(x, lo_t, hi_t, mask4bit, tmp0) \
+       vpand x, mask4bit, tmp0;                     \
+       vpandn x, mask4bit, x;                       \
+       vpsrld $4, x, x;                             \
+                                                    \
+       vpshufb tmp0, lo_t, tmp0;                    \
+       vpshufb x, hi_t, x;                          \
+       vpxor tmp0, x, x;
+
+/* post-SubByte transform. Note: x has been XOR'ed with mask4bit by
+ * 'vaeslastenc' instruction.
+ */
+#define transform_post(x, lo_t, hi_t, mask4bit, tmp0) \
+       vpandn mask4bit, x, tmp0;                     \
+       vpsrld $4, x, x;                              \
+       vpand x, mask4bit, x;                         \
+                                                     \
+       vpshufb tmp0, lo_t, tmp0;                     \
+       vpshufb x, hi_t, x;                           \
+       vpxor tmp0, x, x;
+
+
+.section       .rodata.cst164, "aM", @progbits, 164
+.align 16
+
+/*
+ * Following four affine transform look-up tables are from work by
+ * Markku-Juhani O. Saarinen, at https://github.com/mjosaarinen/sm4ni
+ *
+ * These allow exposing SM4 S-Box from AES SubByte.
+ */
+
+/* pre-SubByte affine transform, from SM4 field to AES field. */
+.Lpre_tf_lo_s:
+       .quad 0x9197E2E474720701, 0xC7C1B4B222245157
+.Lpre_tf_hi_s:
+       .quad 0xE240AB09EB49A200, 0xF052B91BF95BB012
+
+/* post-SubByte affine transform, from AES field to SM4 field. */
+.Lpost_tf_lo_s:
+       .quad 0x5B67F2CEA19D0834, 0xEDD14478172BBE82
+.Lpost_tf_hi_s:
+       .quad 0xAE7201DD73AFDC00, 0x11CDBE62CC1063BF
+
+/* For isolating SubBytes from AESENCLAST, inverse shift row */
+.Linv_shift_row:
+       .byte 0x00, 0x0d, 0x0a, 0x07, 0x04, 0x01, 0x0e, 0x0b
+       .byte 0x08, 0x05, 0x02, 0x0f, 0x0c, 0x09, 0x06, 0x03
+
+/* Inverse shift row + Rotate left by 8 bits on 32-bit words with vpshufb */
+.Linv_shift_row_rol_8:
+       .byte 0x07, 0x00, 0x0d, 0x0a, 0x0b, 0x04, 0x01, 0x0e
+       .byte 0x0f, 0x08, 0x05, 0x02, 0x03, 0x0c, 0x09, 0x06
+
+/* Inverse shift row + Rotate left by 16 bits on 32-bit words with vpshufb */
+.Linv_shift_row_rol_16:
+       .byte 0x0a, 0x07, 0x00, 0x0d, 0x0e, 0x0b, 0x04, 0x01
+       .byte 0x02, 0x0f, 0x08, 0x05, 0x06, 0x03, 0x0c, 0x09
+
+/* Inverse shift row + Rotate left by 24 bits on 32-bit words with vpshufb */
+.Linv_shift_row_rol_24:
+       .byte 0x0d, 0x0a, 0x07, 0x00, 0x01, 0x0e, 0x0b, 0x04
+       .byte 0x05, 0x02, 0x0f, 0x08, 0x09, 0x06, 0x03, 0x0c
+
+/* For CTR-mode IV byteswap */
+.Lbswap128_mask:
+       .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
+
+/* For input word byte-swap */
+.Lbswap32_mask:
+       .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
+
+.align 4
+/* 4-bit mask */
+.L0f0f0f0f:
+       .long 0x0f0f0f0f
+
+
+.text
+.align 16
+
+/*
+ * void sm4_aesni_avx_crypt4(const u32 *rk, u8 *dst,
+ *                           const u8 *src, int nblocks)
+ */
+.align 8
+SYM_FUNC_START(sm4_aesni_avx_crypt4)
+       /* input:
+        *      %rdi: round key array, CTX
+        *      %rsi: dst (1..4 blocks)
+        *      %rdx: src (1..4 blocks)
+        *      %rcx: num blocks (1..4)
+        */
+       FRAME_BEGIN
+
+       vmovdqu 0*16(%rdx), RA0;
+       vmovdqa RA0, RA1;
+       vmovdqa RA0, RA2;
+       vmovdqa RA0, RA3;
+       cmpq $2, %rcx;
+       jb .Lblk4_load_input_done;
+       vmovdqu 1*16(%rdx), RA1;
+       je .Lblk4_load_input_done;
+       vmovdqu 2*16(%rdx), RA2;
+       cmpq $3, %rcx;
+       je .Lblk4_load_input_done;
+       vmovdqu 3*16(%rdx), RA3;
+
+.Lblk4_load_input_done:
+
+       vmovdqa .Lbswap32_mask rRIP, RTMP2;
+       vpshufb RTMP2, RA0, RA0;
+       vpshufb RTMP2, RA1, RA1;
+       vpshufb RTMP2, RA2, RA2;
+       vpshufb RTMP2, RA3, RA3;
+
+       vbroadcastss .L0f0f0f0f rRIP, MASK_4BIT;
+       vmovdqa .Lpre_tf_lo_s rRIP, RTMP4;
+       vmovdqa .Lpre_tf_hi_s rRIP, RB0;
+       vmovdqa .Lpost_tf_lo_s rRIP, RB1;
+       vmovdqa .Lpost_tf_hi_s rRIP, RB2;
+       vmovdqa .Linv_shift_row rRIP, RB3;
+       vmovdqa .Linv_shift_row_rol_8 rRIP, RTMP2;
+       vmovdqa .Linv_shift_row_rol_16 rRIP, RTMP3;
+       transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1);
+
+#define ROUND(round, s0, s1, s2, s3)                                \
+       vbroadcastss (4*(round))(%rdi), RX0;                        \
+       vpxor s1, RX0, RX0;                                         \
+       vpxor s2, RX0, RX0;                                         \
+       vpxor s3, RX0, RX0; /* s1 ^ s2 ^ s3 ^ rk */                 \
+                                                                   \
+       /* sbox, non-linear part */                                 \
+       transform_pre(RX0, RTMP4, RB0, MASK_4BIT, RTMP0);           \
+       vaesenclast MASK_4BIT, RX0, RX0;                            \
+       transform_post(RX0, RB1, RB2, MASK_4BIT, RTMP0);            \
+                                                                   \
+       /* linear part */                                           \
+       vpshufb RB3, RX0, RTMP0;                                    \
+       vpxor RTMP0, s0, s0; /* s0 ^ x */                           \
+       vpshufb RTMP2, RX0, RTMP1;                                  \
+       vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) */               \
+       vpshufb RTMP3, RX0, RTMP1;                                  \
+       vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) ^ rol(x,16) */   \
+       vpshufb .Linv_shift_row_rol_24 rRIP, RX0, RTMP1;            \
+       vpxor RTMP1, s0, s0; /* s0 ^ x ^ rol(x,24) */               \
+       vpslld $2, RTMP0, RTMP1;                                    \
+       vpsrld $30, RTMP0, RTMP0;                                   \
+       vpxor RTMP0, s0, s0;                                        \
+       /* s0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \
+       vpxor RTMP1, s0, s0;
+
+       leaq (32*4)(%rdi), %rax;
+.align 16
+.Lroundloop_blk4:
+       ROUND(0, RA0, RA1, RA2, RA3);
+       ROUND(1, RA1, RA2, RA3, RA0);
+       ROUND(2, RA2, RA3, RA0, RA1);
+       ROUND(3, RA3, RA0, RA1, RA2);
+       leaq (4*4)(%rdi), %rdi;
+       cmpq %rax, %rdi;
+       jne .Lroundloop_blk4;
+
+#undef ROUND
+
+       vmovdqa .Lbswap128_mask rRIP, RTMP2;
+
+       transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1);
+       vpshufb RTMP2, RA0, RA0;
+       vpshufb RTMP2, RA1, RA1;
+       vpshufb RTMP2, RA2, RA2;
+       vpshufb RTMP2, RA3, RA3;
+
+       vmovdqu RA0, 0*16(%rsi);
+       cmpq $2, %rcx;
+       jb .Lblk4_store_output_done;
+       vmovdqu RA1, 1*16(%rsi);
+       je .Lblk4_store_output_done;
+       vmovdqu RA2, 2*16(%rsi);
+       cmpq $3, %rcx;
+       je .Lblk4_store_output_done;
+       vmovdqu RA3, 3*16(%rsi);
+
+.Lblk4_store_output_done:
+       vzeroall;
+       FRAME_END
+       ret;
+SYM_FUNC_END(sm4_aesni_avx_crypt4)
+
+.align 8
+SYM_FUNC_START_LOCAL(__sm4_crypt_blk8)
+       /* input:
+        *      %rdi: round key array, CTX
+        *      RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: eight parallel
+        *                                              plaintext blocks
+        * output:
+        *      RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: eight parallel
+        *                                              ciphertext blocks
+        */
+       FRAME_BEGIN
+
+       vmovdqa .Lbswap32_mask rRIP, RTMP2;
+       vpshufb RTMP2, RA0, RA0;
+       vpshufb RTMP2, RA1, RA1;
+       vpshufb RTMP2, RA2, RA2;
+       vpshufb RTMP2, RA3, RA3;
+       vpshufb RTMP2, RB0, RB0;
+       vpshufb RTMP2, RB1, RB1;
+       vpshufb RTMP2, RB2, RB2;
+       vpshufb RTMP2, RB3, RB3;
+
+       vbroadcastss .L0f0f0f0f rRIP, MASK_4BIT;
+       transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1);
+       transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1);
+
+#define ROUND(round, s0, s1, s2, s3, r0, r1, r2, r3)                \
+       vbroadcastss (4*(round))(%rdi), RX0;                        \
+       vmovdqa .Lpre_tf_lo_s rRIP, RTMP4;                          \
+       vmovdqa .Lpre_tf_hi_s rRIP, RTMP1;                          \
+       vmovdqa RX0, RX1;                                           \
+       vpxor s1, RX0, RX0;                                         \
+       vpxor s2, RX0, RX0;                                         \
+       vpxor s3, RX0, RX0; /* s1 ^ s2 ^ s3 ^ rk */                 \
+       vmovdqa .Lpost_tf_lo_s rRIP, RTMP2;                         \
+       vmovdqa .Lpost_tf_hi_s rRIP, RTMP3;                         \
+       vpxor r1, RX1, RX1;                                         \
+       vpxor r2, RX1, RX1;                                         \
+       vpxor r3, RX1, RX1; /* r1 ^ r2 ^ r3 ^ rk */                 \
+                                                                    \
+       /* sbox, non-linear part */                                 \
+       transform_pre(RX0, RTMP4, RTMP1, MASK_4BIT, RTMP0);         \
+       transform_pre(RX1, RTMP4, RTMP1, MASK_4BIT, RTMP0);         \
+       vmovdqa .Linv_shift_row rRIP, RTMP4;                        \
+       vaesenclast MASK_4BIT, RX0, RX0;                            \
+       vaesenclast MASK_4BIT, RX1, RX1;                            \
+       transform_post(RX0, RTMP2, RTMP3, MASK_4BIT, RTMP0);        \
+       transform_post(RX1, RTMP2, RTMP3, MASK_4BIT, RTMP0);        \
+                                                                    \
+       /* linear part */                                           \
+       vpshufb RTMP4, RX0, RTMP0;                                  \
+       vpxor RTMP0, s0, s0; /* s0 ^ x */                           \
+       vpshufb RTMP4, RX1, RTMP2;                                  \
+       vmovdqa .Linv_shift_row_rol_8 rRIP, RTMP4;                  \
+       vpxor RTMP2, r0, r0; /* r0 ^ x */                           \
+       vpshufb RTMP4, RX0, RTMP1;                                  \
+       vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) */               \
+       vpshufb RTMP4, RX1, RTMP3;                                  \
+       vmovdqa .Linv_shift_row_rol_16 rRIP, RTMP4;                 \
+       vpxor RTMP3, RTMP2, RTMP2; /* x ^ rol(x,8) */               \
+       vpshufb RTMP4, RX0, RTMP1;                                  \
+       vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) ^ rol(x,16) */   \
+       vpshufb RTMP4, RX1, RTMP3;                                  \
+       vmovdqa .Linv_shift_row_rol_24 rRIP, RTMP4;                 \
+       vpxor RTMP3, RTMP2, RTMP2; /* x ^ rol(x,8) ^ rol(x,16) */   \
+       vpshufb RTMP4, RX0, RTMP1;                                  \
+       vpxor RTMP1, s0, s0; /* s0 ^ x ^ rol(x,24) */               \
+       /* s0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \
+       vpslld $2, RTMP0, RTMP1;                                    \
+       vpsrld $30, RTMP0, RTMP0;                                   \
+       vpxor RTMP0, s0, s0;                                        \
+       vpxor RTMP1, s0, s0;                                        \
+       vpshufb RTMP4, RX1, RTMP3;                                  \
+       vpxor RTMP3, r0, r0; /* r0 ^ x ^ rol(x,24) */               \
+       /* r0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \
+       vpslld $2, RTMP2, RTMP3;                                    \
+       vpsrld $30, RTMP2, RTMP2;                                   \
+       vpxor RTMP2, r0, r0;                                        \
+       vpxor RTMP3, r0, r0;
+
+       leaq (32*4)(%rdi), %rax;
+.align 16
+.Lroundloop_blk8:
+       ROUND(0, RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3);
+       ROUND(1, RA1, RA2, RA3, RA0, RB1, RB2, RB3, RB0);
+       ROUND(2, RA2, RA3, RA0, RA1, RB2, RB3, RB0, RB1);
+       ROUND(3, RA3, RA0, RA1, RA2, RB3, RB0, RB1, RB2);
+       leaq (4*4)(%rdi), %rdi;
+       cmpq %rax, %rdi;
+       jne .Lroundloop_blk8;
+
+#undef ROUND
+
+       vmovdqa .Lbswap128_mask rRIP, RTMP2;
+
+       transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1);
+       transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1);
+       vpshufb RTMP2, RA0, RA0;
+       vpshufb RTMP2, RA1, RA1;
+       vpshufb RTMP2, RA2, RA2;
+       vpshufb RTMP2, RA3, RA3;
+       vpshufb RTMP2, RB0, RB0;
+       vpshufb RTMP2, RB1, RB1;
+       vpshufb RTMP2, RB2, RB2;
+       vpshufb RTMP2, RB3, RB3;
+
+       FRAME_END
+       ret;
+SYM_FUNC_END(__sm4_crypt_blk8)
+
+/*
+ * void sm4_aesni_avx_crypt8(const u32 *rk, u8 *dst,
+ *                           const u8 *src, int nblocks)
+ */
+.align 8
+SYM_FUNC_START(sm4_aesni_avx_crypt8)
+       /* input:
+        *      %rdi: round key array, CTX
+        *      %rsi: dst (1..8 blocks)
+        *      %rdx: src (1..8 blocks)
+        *      %rcx: num blocks (1..8)
+        */
+       FRAME_BEGIN
+
+       cmpq $5, %rcx;
+       jb sm4_aesni_avx_crypt4;
+       vmovdqu (0 * 16)(%rdx), RA0;
+       vmovdqu (1 * 16)(%rdx), RA1;
+       vmovdqu (2 * 16)(%rdx), RA2;
+       vmovdqu (3 * 16)(%rdx), RA3;
+       vmovdqu (4 * 16)(%rdx), RB0;
+       vmovdqa RB0, RB1;
+       vmovdqa RB0, RB2;
+       vmovdqa RB0, RB3;
+       je .Lblk8_load_input_done;
+       vmovdqu (5 * 16)(%rdx), RB1;
+       cmpq $7, %rcx;
+       jb .Lblk8_load_input_done;
+       vmovdqu (6 * 16)(%rdx), RB2;
+       je .Lblk8_load_input_done;
+       vmovdqu (7 * 16)(%rdx), RB3;
+
+.Lblk8_load_input_done:
+       call __sm4_crypt_blk8;
+
+       cmpq $6, %rcx;
+       vmovdqu RA0, (0 * 16)(%rsi);
+       vmovdqu RA1, (1 * 16)(%rsi);
+       vmovdqu RA2, (2 * 16)(%rsi);
+       vmovdqu RA3, (3 * 16)(%rsi);
+       vmovdqu RB0, (4 * 16)(%rsi);
+       jb .Lblk8_store_output_done;
+       vmovdqu RB1, (5 * 16)(%rsi);
+       je .Lblk8_store_output_done;
+       vmovdqu RB2, (6 * 16)(%rsi);
+       cmpq $7, %rcx;
+       je .Lblk8_store_output_done;
+       vmovdqu RB3, (7 * 16)(%rsi);
+
+.Lblk8_store_output_done:
+       vzeroall;
+       FRAME_END
+       ret;
+SYM_FUNC_END(sm4_aesni_avx_crypt8)
+
+/*
+ * void sm4_aesni_avx_ctr_enc_blk8(const u32 *rk, u8 *dst,
+ *                                 const u8 *src, u8 *iv)
+ */
+.align 8
+SYM_FUNC_START(sm4_aesni_avx_ctr_enc_blk8)
+       /* input:
+        *      %rdi: round key array, CTX
+        *      %rsi: dst (8 blocks)
+        *      %rdx: src (8 blocks)
+        *      %rcx: iv (big endian, 128bit)
+        */
+       FRAME_BEGIN
+
+       /* load IV and byteswap */
+       vmovdqu (%rcx), RA0;
+
+       vmovdqa .Lbswap128_mask rRIP, RBSWAP;
+       vpshufb RBSWAP, RA0, RTMP0; /* be => le */
+
+       vpcmpeqd RNOT, RNOT, RNOT;
+       vpsrldq $8, RNOT, RNOT; /* low: -1, high: 0 */
+
+#define inc_le128(x, minus_one, tmp) \
+       vpcmpeqq minus_one, x, tmp;  \
+       vpsubq minus_one, x, x;      \
+       vpslldq $8, tmp, tmp;        \
+       vpsubq tmp, x, x;
+
+       /* construct IVs */
+       inc_le128(RTMP0, RNOT, RTMP2); /* +1 */
+       vpshufb RBSWAP, RTMP0, RA1;
+       inc_le128(RTMP0, RNOT, RTMP2); /* +2 */
+       vpshufb RBSWAP, RTMP0, RA2;
+       inc_le128(RTMP0, RNOT, RTMP2); /* +3 */
+       vpshufb RBSWAP, RTMP0, RA3;
+       inc_le128(RTMP0, RNOT, RTMP2); /* +4 */
+       vpshufb RBSWAP, RTMP0, RB0;
+       inc_le128(RTMP0, RNOT, RTMP2); /* +5 */
+       vpshufb RBSWAP, RTMP0, RB1;
+       inc_le128(RTMP0, RNOT, RTMP2); /* +6 */
+       vpshufb RBSWAP, RTMP0, RB2;
+       inc_le128(RTMP0, RNOT, RTMP2); /* +7 */
+       vpshufb RBSWAP, RTMP0, RB3;
+       inc_le128(RTMP0, RNOT, RTMP2); /* +8 */
+       vpshufb RBSWAP, RTMP0, RTMP1;
+
+       /* store new IV */
+       vmovdqu RTMP1, (%rcx);
+
+       call __sm4_crypt_blk8;
+
+       vpxor (0 * 16)(%rdx), RA0, RA0;
+       vpxor (1 * 16)(%rdx), RA1, RA1;
+       vpxor (2 * 16)(%rdx), RA2, RA2;
+       vpxor (3 * 16)(%rdx), RA3, RA3;
+       vpxor (4 * 16)(%rdx), RB0, RB0;
+       vpxor (5 * 16)(%rdx), RB1, RB1;
+       vpxor (6 * 16)(%rdx), RB2, RB2;
+       vpxor (7 * 16)(%rdx), RB3, RB3;
+
+       vmovdqu RA0, (0 * 16)(%rsi);
+       vmovdqu RA1, (1 * 16)(%rsi);
+       vmovdqu RA2, (2 * 16)(%rsi);
+       vmovdqu RA3, (3 * 16)(%rsi);
+       vmovdqu RB0, (4 * 16)(%rsi);
+       vmovdqu RB1, (5 * 16)(%rsi);
+       vmovdqu RB2, (6 * 16)(%rsi);
+       vmovdqu RB3, (7 * 16)(%rsi);
+
+       vzeroall;
+       FRAME_END
+       ret;
+SYM_FUNC_END(sm4_aesni_avx_ctr_enc_blk8)
+
+/*
+ * void sm4_aesni_avx_cbc_dec_blk8(const u32 *rk, u8 *dst,
+ *                                 const u8 *src, u8 *iv)
+ */
+.align 8
+SYM_FUNC_START(sm4_aesni_avx_cbc_dec_blk8)
+       /* input:
+        *      %rdi: round key array, CTX
+        *      %rsi: dst (8 blocks)
+        *      %rdx: src (8 blocks)
+        *      %rcx: iv
+        */
+       FRAME_BEGIN
+
+       vmovdqu (0 * 16)(%rdx), RA0;
+       vmovdqu (1 * 16)(%rdx), RA1;
+       vmovdqu (2 * 16)(%rdx), RA2;
+       vmovdqu (3 * 16)(%rdx), RA3;
+       vmovdqu (4 * 16)(%rdx), RB0;
+       vmovdqu (5 * 16)(%rdx), RB1;
+       vmovdqu (6 * 16)(%rdx), RB2;
+       vmovdqu (7 * 16)(%rdx), RB3;
+
+       call __sm4_crypt_blk8;
+
+       vmovdqu (7 * 16)(%rdx), RNOT;
+       vpxor (%rcx), RA0, RA0;
+       vpxor (0 * 16)(%rdx), RA1, RA1;
+       vpxor (1 * 16)(%rdx), RA2, RA2;
+       vpxor (2 * 16)(%rdx), RA3, RA3;
+       vpxor (3 * 16)(%rdx), RB0, RB0;
+       vpxor (4 * 16)(%rdx), RB1, RB1;
+       vpxor (5 * 16)(%rdx), RB2, RB2;
+       vpxor (6 * 16)(%rdx), RB3, RB3;
+       vmovdqu RNOT, (%rcx); /* store new IV */
+
+       vmovdqu RA0, (0 * 16)(%rsi);
+       vmovdqu RA1, (1 * 16)(%rsi);
+       vmovdqu RA2, (2 * 16)(%rsi);
+       vmovdqu RA3, (3 * 16)(%rsi);
+       vmovdqu RB0, (4 * 16)(%rsi);
+       vmovdqu RB1, (5 * 16)(%rsi);
+       vmovdqu RB2, (6 * 16)(%rsi);
+       vmovdqu RB3, (7 * 16)(%rsi);
+
+       vzeroall;
+       FRAME_END
+       ret;
+SYM_FUNC_END(sm4_aesni_avx_cbc_dec_blk8)
+
+/*
+ * void sm4_aesni_avx_cfb_dec_blk8(const u32 *rk, u8 *dst,
+ *                                 const u8 *src, u8 *iv)
+ */
+.align 8
+SYM_FUNC_START(sm4_aesni_avx_cfb_dec_blk8)
+       /* input:
+        *      %rdi: round key array, CTX
+        *      %rsi: dst (8 blocks)
+        *      %rdx: src (8 blocks)
+        *      %rcx: iv
+        */
+       FRAME_BEGIN
+
+       /* Load input */
+       vmovdqu (%rcx), RA0;
+       vmovdqu 0 * 16(%rdx), RA1;
+       vmovdqu 1 * 16(%rdx), RA2;
+       vmovdqu 2 * 16(%rdx), RA3;
+       vmovdqu 3 * 16(%rdx), RB0;
+       vmovdqu 4 * 16(%rdx), RB1;
+       vmovdqu 5 * 16(%rdx), RB2;
+       vmovdqu 6 * 16(%rdx), RB3;
+
+       /* Update IV */
+       vmovdqu 7 * 16(%rdx), RNOT;
+       vmovdqu RNOT, (%rcx);
+
+       call __sm4_crypt_blk8;
+
+       vpxor (0 * 16)(%rdx), RA0, RA0;
+       vpxor (1 * 16)(%rdx), RA1, RA1;
+       vpxor (2 * 16)(%rdx), RA2, RA2;
+       vpxor (3 * 16)(%rdx), RA3, RA3;
+       vpxor (4 * 16)(%rdx), RB0, RB0;
+       vpxor (5 * 16)(%rdx), RB1, RB1;
+       vpxor (6 * 16)(%rdx), RB2, RB2;
+       vpxor (7 * 16)(%rdx), RB3, RB3;
+
+       vmovdqu RA0, (0 * 16)(%rsi);
+       vmovdqu RA1, (1 * 16)(%rsi);
+       vmovdqu RA2, (2 * 16)(%rsi);
+       vmovdqu RA3, (3 * 16)(%rsi);
+       vmovdqu RB0, (4 * 16)(%rsi);
+       vmovdqu RB1, (5 * 16)(%rsi);
+       vmovdqu RB2, (6 * 16)(%rsi);
+       vmovdqu RB3, (7 * 16)(%rsi);
+
+       vzeroall;
+       FRAME_END
+       ret;
+SYM_FUNC_END(sm4_aesni_avx_cfb_dec_blk8)
diff --git a/arch/x86/crypto/sm4-aesni-avx2-asm_64.S b/arch/x86/crypto/sm4-aesni-avx2-asm_64.S
new file mode 100644 (file)
index 0000000..d2ffd7f
--- /dev/null
@@ -0,0 +1,497 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * SM4 Cipher Algorithm, AES-NI/AVX2 optimized.
+ * as specified in
+ * https://tools.ietf.org/id/draft-ribose-cfrg-sm4-10.html
+ *
+ * Copyright (C) 2018 Markku-Juhani O. Saarinen <mjos@iki.fi>
+ * Copyright (C) 2020 Jussi Kivilinna <jussi.kivilinna@iki.fi>
+ * Copyright (c) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
+ */
+
+/* Based on SM4 AES-NI work by libgcrypt and Markku-Juhani O. Saarinen at:
+ *  https://github.com/mjosaarinen/sm4ni
+ */
+
+#include <linux/linkage.h>
+#include <asm/frame.h>
+
+#define rRIP         (%rip)
+
+/* vector registers */
+#define RX0          %ymm0
+#define RX1          %ymm1
+#define MASK_4BIT    %ymm2
+#define RTMP0        %ymm3
+#define RTMP1        %ymm4
+#define RTMP2        %ymm5
+#define RTMP3        %ymm6
+#define RTMP4        %ymm7
+
+#define RA0          %ymm8
+#define RA1          %ymm9
+#define RA2          %ymm10
+#define RA3          %ymm11
+
+#define RB0          %ymm12
+#define RB1          %ymm13
+#define RB2          %ymm14
+#define RB3          %ymm15
+
+#define RNOT         %ymm0
+#define RBSWAP       %ymm1
+
+#define RX0x         %xmm0
+#define RX1x         %xmm1
+#define MASK_4BITx   %xmm2
+
+#define RNOTx        %xmm0
+#define RBSWAPx      %xmm1
+
+#define RTMP0x       %xmm3
+#define RTMP1x       %xmm4
+#define RTMP2x       %xmm5
+#define RTMP3x       %xmm6
+#define RTMP4x       %xmm7
+
+
+/* helper macros */
+
+/* Transpose four 32-bit words between 128-bit vector lanes. */
+#define transpose_4x4(x0, x1, x2, x3, t1, t2) \
+       vpunpckhdq x1, x0, t2;                \
+       vpunpckldq x1, x0, x0;                \
+                                             \
+       vpunpckldq x3, x2, t1;                \
+       vpunpckhdq x3, x2, x2;                \
+                                             \
+       vpunpckhqdq t1, x0, x1;               \
+       vpunpcklqdq t1, x0, x0;               \
+                                             \
+       vpunpckhqdq x2, t2, x3;               \
+       vpunpcklqdq x2, t2, x2;
+
+/* post-SubByte transform. */
+#define transform_pre(x, lo_t, hi_t, mask4bit, tmp0) \
+       vpand x, mask4bit, tmp0;                     \
+       vpandn x, mask4bit, x;                       \
+       vpsrld $4, x, x;                             \
+                                                    \
+       vpshufb tmp0, lo_t, tmp0;                    \
+       vpshufb x, hi_t, x;                          \
+       vpxor tmp0, x, x;
+
+/* post-SubByte transform. Note: x has been XOR'ed with mask4bit by
+ * 'vaeslastenc' instruction. */
+#define transform_post(x, lo_t, hi_t, mask4bit, tmp0) \
+       vpandn mask4bit, x, tmp0;                     \
+       vpsrld $4, x, x;                              \
+       vpand x, mask4bit, x;                         \
+                                                     \
+       vpshufb tmp0, lo_t, tmp0;                     \
+       vpshufb x, hi_t, x;                           \
+       vpxor tmp0, x, x;
+
+
+.section       .rodata.cst164, "aM", @progbits, 164
+.align 16
+
+/*
+ * Following four affine transform look-up tables are from work by
+ * Markku-Juhani O. Saarinen, at https://github.com/mjosaarinen/sm4ni
+ *
+ * These allow exposing SM4 S-Box from AES SubByte.
+ */
+
+/* pre-SubByte affine transform, from SM4 field to AES field. */
+.Lpre_tf_lo_s:
+       .quad 0x9197E2E474720701, 0xC7C1B4B222245157
+.Lpre_tf_hi_s:
+       .quad 0xE240AB09EB49A200, 0xF052B91BF95BB012
+
+/* post-SubByte affine transform, from AES field to SM4 field. */
+.Lpost_tf_lo_s:
+       .quad 0x5B67F2CEA19D0834, 0xEDD14478172BBE82
+.Lpost_tf_hi_s:
+       .quad 0xAE7201DD73AFDC00, 0x11CDBE62CC1063BF
+
+/* For isolating SubBytes from AESENCLAST, inverse shift row */
+.Linv_shift_row:
+       .byte 0x00, 0x0d, 0x0a, 0x07, 0x04, 0x01, 0x0e, 0x0b
+       .byte 0x08, 0x05, 0x02, 0x0f, 0x0c, 0x09, 0x06, 0x03
+
+/* Inverse shift row + Rotate left by 8 bits on 32-bit words with vpshufb */
+.Linv_shift_row_rol_8:
+       .byte 0x07, 0x00, 0x0d, 0x0a, 0x0b, 0x04, 0x01, 0x0e
+       .byte 0x0f, 0x08, 0x05, 0x02, 0x03, 0x0c, 0x09, 0x06
+
+/* Inverse shift row + Rotate left by 16 bits on 32-bit words with vpshufb */
+.Linv_shift_row_rol_16:
+       .byte 0x0a, 0x07, 0x00, 0x0d, 0x0e, 0x0b, 0x04, 0x01
+       .byte 0x02, 0x0f, 0x08, 0x05, 0x06, 0x03, 0x0c, 0x09
+
+/* Inverse shift row + Rotate left by 24 bits on 32-bit words with vpshufb */
+.Linv_shift_row_rol_24:
+       .byte 0x0d, 0x0a, 0x07, 0x00, 0x01, 0x0e, 0x0b, 0x04
+       .byte 0x05, 0x02, 0x0f, 0x08, 0x09, 0x06, 0x03, 0x0c
+
+/* For CTR-mode IV byteswap */
+.Lbswap128_mask:
+       .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
+
+/* For input word byte-swap */
+.Lbswap32_mask:
+       .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
+
+.align 4
+/* 4-bit mask */
+.L0f0f0f0f:
+       .long 0x0f0f0f0f
+
+.text
+.align 16
+
+.align 8
+SYM_FUNC_START_LOCAL(__sm4_crypt_blk16)
+       /* input:
+        *      %rdi: round key array, CTX
+        *      RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: sixteen parallel
+        *                                              plaintext blocks
+        * output:
+        *      RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: sixteen parallel
+        *                                              ciphertext blocks
+        */
+       FRAME_BEGIN
+
+       vbroadcasti128 .Lbswap32_mask rRIP, RTMP2;
+       vpshufb RTMP2, RA0, RA0;
+       vpshufb RTMP2, RA1, RA1;
+       vpshufb RTMP2, RA2, RA2;
+       vpshufb RTMP2, RA3, RA3;
+       vpshufb RTMP2, RB0, RB0;
+       vpshufb RTMP2, RB1, RB1;
+       vpshufb RTMP2, RB2, RB2;
+       vpshufb RTMP2, RB3, RB3;
+
+       vpbroadcastd .L0f0f0f0f rRIP, MASK_4BIT;
+       transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1);
+       transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1);
+
+#define ROUND(round, s0, s1, s2, s3, r0, r1, r2, r3)                \
+       vpbroadcastd (4*(round))(%rdi), RX0;                        \
+       vbroadcasti128 .Lpre_tf_lo_s rRIP, RTMP4;                   \
+       vbroadcasti128 .Lpre_tf_hi_s rRIP, RTMP1;                   \
+       vmovdqa RX0, RX1;                                           \
+       vpxor s1, RX0, RX0;                                         \
+       vpxor s2, RX0, RX0;                                         \
+       vpxor s3, RX0, RX0; /* s1 ^ s2 ^ s3 ^ rk */                 \
+       vbroadcasti128 .Lpost_tf_lo_s rRIP, RTMP2;                  \
+       vbroadcasti128 .Lpost_tf_hi_s rRIP, RTMP3;                  \
+       vpxor r1, RX1, RX1;                                         \
+       vpxor r2, RX1, RX1;                                         \
+       vpxor r3, RX1, RX1; /* r1 ^ r2 ^ r3 ^ rk */                 \
+                                                                   \
+       /* sbox, non-linear part */                                 \
+       transform_pre(RX0, RTMP4, RTMP1, MASK_4BIT, RTMP0);         \
+       transform_pre(RX1, RTMP4, RTMP1, MASK_4BIT, RTMP0);         \
+       vextracti128 $1, RX0, RTMP4x;                               \
+       vextracti128 $1, RX1, RTMP0x;                               \
+       vaesenclast MASK_4BITx, RX0x, RX0x;                         \
+       vaesenclast MASK_4BITx, RTMP4x, RTMP4x;                     \
+       vaesenclast MASK_4BITx, RX1x, RX1x;                         \
+       vaesenclast MASK_4BITx, RTMP0x, RTMP0x;                     \
+       vinserti128 $1, RTMP4x, RX0, RX0;                           \
+       vbroadcasti128 .Linv_shift_row rRIP, RTMP4;                 \
+       vinserti128 $1, RTMP0x, RX1, RX1;                           \
+       transform_post(RX0, RTMP2, RTMP3, MASK_4BIT, RTMP0);        \
+       transform_post(RX1, RTMP2, RTMP3, MASK_4BIT, RTMP0);        \
+                                                                   \
+       /* linear part */                                           \
+       vpshufb RTMP4, RX0, RTMP0;                                  \
+       vpxor RTMP0, s0, s0; /* s0 ^ x */                           \
+       vpshufb RTMP4, RX1, RTMP2;                                  \
+       vbroadcasti128 .Linv_shift_row_rol_8 rRIP, RTMP4;           \
+       vpxor RTMP2, r0, r0; /* r0 ^ x */                           \
+       vpshufb RTMP4, RX0, RTMP1;                                  \
+       vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) */               \
+       vpshufb RTMP4, RX1, RTMP3;                                  \
+       vbroadcasti128 .Linv_shift_row_rol_16 rRIP, RTMP4;          \
+       vpxor RTMP3, RTMP2, RTMP2; /* x ^ rol(x,8) */               \
+       vpshufb RTMP4, RX0, RTMP1;                                  \
+       vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) ^ rol(x,16) */   \
+       vpshufb RTMP4, RX1, RTMP3;                                  \
+       vbroadcasti128 .Linv_shift_row_rol_24 rRIP, RTMP4;          \
+       vpxor RTMP3, RTMP2, RTMP2; /* x ^ rol(x,8) ^ rol(x,16) */   \
+       vpshufb RTMP4, RX0, RTMP1;                                  \
+       vpxor RTMP1, s0, s0; /* s0 ^ x ^ rol(x,24) */               \
+       vpslld $2, RTMP0, RTMP1;                                    \
+       vpsrld $30, RTMP0, RTMP0;                                   \
+       vpxor RTMP0, s0, s0;                                        \
+       /* s0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \
+       vpxor RTMP1, s0, s0;                                        \
+       vpshufb RTMP4, RX1, RTMP3;                                  \
+       vpxor RTMP3, r0, r0; /* r0 ^ x ^ rol(x,24) */               \
+       vpslld $2, RTMP2, RTMP3;                                    \
+       vpsrld $30, RTMP2, RTMP2;                                   \
+       vpxor RTMP2, r0, r0;                                        \
+       /* r0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \
+       vpxor RTMP3, r0, r0;
+
+       leaq (32*4)(%rdi), %rax;
+.align 16
+.Lroundloop_blk8:
+       ROUND(0, RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3);
+       ROUND(1, RA1, RA2, RA3, RA0, RB1, RB2, RB3, RB0);
+       ROUND(2, RA2, RA3, RA0, RA1, RB2, RB3, RB0, RB1);
+       ROUND(3, RA3, RA0, RA1, RA2, RB3, RB0, RB1, RB2);
+       leaq (4*4)(%rdi), %rdi;
+       cmpq %rax, %rdi;
+       jne .Lroundloop_blk8;
+
+#undef ROUND
+
+       vbroadcasti128 .Lbswap128_mask rRIP, RTMP2;
+
+       transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1);
+       transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1);
+       vpshufb RTMP2, RA0, RA0;
+       vpshufb RTMP2, RA1, RA1;
+       vpshufb RTMP2, RA2, RA2;
+       vpshufb RTMP2, RA3, RA3;
+       vpshufb RTMP2, RB0, RB0;
+       vpshufb RTMP2, RB1, RB1;
+       vpshufb RTMP2, RB2, RB2;
+       vpshufb RTMP2, RB3, RB3;
+
+       FRAME_END
+       ret;
+SYM_FUNC_END(__sm4_crypt_blk16)
+
+#define inc_le128(x, minus_one, tmp) \
+       vpcmpeqq minus_one, x, tmp;  \
+       vpsubq minus_one, x, x;      \
+       vpslldq $8, tmp, tmp;        \
+       vpsubq tmp, x, x;
+
+/*
+ * void sm4_aesni_avx2_ctr_enc_blk16(const u32 *rk, u8 *dst,
+ *                                   const u8 *src, u8 *iv)
+ */
+.align 8
+SYM_FUNC_START(sm4_aesni_avx2_ctr_enc_blk16)
+       /* input:
+        *      %rdi: round key array, CTX
+        *      %rsi: dst (16 blocks)
+        *      %rdx: src (16 blocks)
+        *      %rcx: iv (big endian, 128bit)
+        */
+       FRAME_BEGIN
+
+       movq 8(%rcx), %rax;
+       bswapq %rax;
+
+       vzeroupper;
+
+       vbroadcasti128 .Lbswap128_mask rRIP, RTMP3;
+       vpcmpeqd RNOT, RNOT, RNOT;
+       vpsrldq $8, RNOT, RNOT;   /* ab: -1:0 ; cd: -1:0 */
+       vpaddq RNOT, RNOT, RTMP2; /* ab: -2:0 ; cd: -2:0 */
+
+       /* load IV and byteswap */
+       vmovdqu (%rcx), RTMP4x;
+       vpshufb RTMP3x, RTMP4x, RTMP4x;
+       vmovdqa RTMP4x, RTMP0x;
+       inc_le128(RTMP4x, RNOTx, RTMP1x);
+       vinserti128 $1, RTMP4x, RTMP0, RTMP0;
+       vpshufb RTMP3, RTMP0, RA0; /* +1 ; +0 */
+
+       /* check need for handling 64-bit overflow and carry */
+       cmpq $(0xffffffffffffffff - 16), %rax;
+       ja .Lhandle_ctr_carry;
+
+       /* construct IVs */
+       vpsubq RTMP2, RTMP0, RTMP0; /* +3 ; +2 */
+       vpshufb RTMP3, RTMP0, RA1;
+       vpsubq RTMP2, RTMP0, RTMP0; /* +5 ; +4 */
+       vpshufb RTMP3, RTMP0, RA2;
+       vpsubq RTMP2, RTMP0, RTMP0; /* +7 ; +6 */
+       vpshufb RTMP3, RTMP0, RA3;
+       vpsubq RTMP2, RTMP0, RTMP0; /* +9 ; +8 */
+       vpshufb RTMP3, RTMP0, RB0;
+       vpsubq RTMP2, RTMP0, RTMP0; /* +11 ; +10 */
+       vpshufb RTMP3, RTMP0, RB1;
+       vpsubq RTMP2, RTMP0, RTMP0; /* +13 ; +12 */
+       vpshufb RTMP3, RTMP0, RB2;
+       vpsubq RTMP2, RTMP0, RTMP0; /* +15 ; +14 */
+       vpshufb RTMP3, RTMP0, RB3;
+       vpsubq RTMP2, RTMP0, RTMP0; /* +16 */
+       vpshufb RTMP3x, RTMP0x, RTMP0x;
+
+       jmp .Lctr_carry_done;
+
+.Lhandle_ctr_carry:
+       /* construct IVs */
+       inc_le128(RTMP0, RNOT, RTMP1);
+       inc_le128(RTMP0, RNOT, RTMP1);
+       vpshufb RTMP3, RTMP0, RA1; /* +3 ; +2 */
+       inc_le128(RTMP0, RNOT, RTMP1);
+       inc_le128(RTMP0, RNOT, RTMP1);
+       vpshufb RTMP3, RTMP0, RA2; /* +5 ; +4 */
+       inc_le128(RTMP0, RNOT, RTMP1);
+       inc_le128(RTMP0, RNOT, RTMP1);
+       vpshufb RTMP3, RTMP0, RA3; /* +7 ; +6 */
+       inc_le128(RTMP0, RNOT, RTMP1);
+       inc_le128(RTMP0, RNOT, RTMP1);
+       vpshufb RTMP3, RTMP0, RB0; /* +9 ; +8 */
+       inc_le128(RTMP0, RNOT, RTMP1);
+       inc_le128(RTMP0, RNOT, RTMP1);
+       vpshufb RTMP3, RTMP0, RB1; /* +11 ; +10 */
+       inc_le128(RTMP0, RNOT, RTMP1);
+       inc_le128(RTMP0, RNOT, RTMP1);
+       vpshufb RTMP3, RTMP0, RB2; /* +13 ; +12 */
+       inc_le128(RTMP0, RNOT, RTMP1);
+       inc_le128(RTMP0, RNOT, RTMP1);
+       vpshufb RTMP3, RTMP0, RB3; /* +15 ; +14 */
+       inc_le128(RTMP0, RNOT, RTMP1);
+       vextracti128 $1, RTMP0, RTMP0x;
+       vpshufb RTMP3x, RTMP0x, RTMP0x; /* +16 */
+
+.align 4
+.Lctr_carry_done:
+       /* store new IV */
+       vmovdqu RTMP0x, (%rcx);
+
+       call __sm4_crypt_blk16;
+
+       vpxor (0 * 32)(%rdx), RA0, RA0;
+       vpxor (1 * 32)(%rdx), RA1, RA1;
+       vpxor (2 * 32)(%rdx), RA2, RA2;
+       vpxor (3 * 32)(%rdx), RA3, RA3;
+       vpxor (4 * 32)(%rdx), RB0, RB0;
+       vpxor (5 * 32)(%rdx), RB1, RB1;
+       vpxor (6 * 32)(%rdx), RB2, RB2;
+       vpxor (7 * 32)(%rdx), RB3, RB3;
+
+       vmovdqu RA0, (0 * 32)(%rsi);
+       vmovdqu RA1, (1 * 32)(%rsi);
+       vmovdqu RA2, (2 * 32)(%rsi);
+       vmovdqu RA3, (3 * 32)(%rsi);
+       vmovdqu RB0, (4 * 32)(%rsi);
+       vmovdqu RB1, (5 * 32)(%rsi);
+       vmovdqu RB2, (6 * 32)(%rsi);
+       vmovdqu RB3, (7 * 32)(%rsi);
+
+       vzeroall;
+       FRAME_END
+       ret;
+SYM_FUNC_END(sm4_aesni_avx2_ctr_enc_blk16)
+
+/*
+ * void sm4_aesni_avx2_cbc_dec_blk16(const u32 *rk, u8 *dst,
+ *                                   const u8 *src, u8 *iv)
+ */
+.align 8
+SYM_FUNC_START(sm4_aesni_avx2_cbc_dec_blk16)
+       /* input:
+        *      %rdi: round key array, CTX
+        *      %rsi: dst (16 blocks)
+        *      %rdx: src (16 blocks)
+        *      %rcx: iv
+        */
+       FRAME_BEGIN
+
+       vzeroupper;
+
+       vmovdqu (0 * 32)(%rdx), RA0;
+       vmovdqu (1 * 32)(%rdx), RA1;
+       vmovdqu (2 * 32)(%rdx), RA2;
+       vmovdqu (3 * 32)(%rdx), RA3;
+       vmovdqu (4 * 32)(%rdx), RB0;
+       vmovdqu (5 * 32)(%rdx), RB1;
+       vmovdqu (6 * 32)(%rdx), RB2;
+       vmovdqu (7 * 32)(%rdx), RB3;
+
+       call __sm4_crypt_blk16;
+
+       vmovdqu (%rcx), RNOTx;
+       vinserti128 $1, (%rdx), RNOT, RNOT;
+       vpxor RNOT, RA0, RA0;
+       vpxor (0 * 32 + 16)(%rdx), RA1, RA1;
+       vpxor (1 * 32 + 16)(%rdx), RA2, RA2;
+       vpxor (2 * 32 + 16)(%rdx), RA3, RA3;
+       vpxor (3 * 32 + 16)(%rdx), RB0, RB0;
+       vpxor (4 * 32 + 16)(%rdx), RB1, RB1;
+       vpxor (5 * 32 + 16)(%rdx), RB2, RB2;
+       vpxor (6 * 32 + 16)(%rdx), RB3, RB3;
+       vmovdqu (7 * 32 + 16)(%rdx), RNOTx;
+       vmovdqu RNOTx, (%rcx); /* store new IV */
+
+       vmovdqu RA0, (0 * 32)(%rsi);
+       vmovdqu RA1, (1 * 32)(%rsi);
+       vmovdqu RA2, (2 * 32)(%rsi);
+       vmovdqu RA3, (3 * 32)(%rsi);
+       vmovdqu RB0, (4 * 32)(%rsi);
+       vmovdqu RB1, (5 * 32)(%rsi);
+       vmovdqu RB2, (6 * 32)(%rsi);
+       vmovdqu RB3, (7 * 32)(%rsi);
+
+       vzeroall;
+       FRAME_END
+       ret;
+SYM_FUNC_END(sm4_aesni_avx2_cbc_dec_blk16)
+
+/*
+ * void sm4_aesni_avx2_cfb_dec_blk16(const u32 *rk, u8 *dst,
+ *                                   const u8 *src, u8 *iv)
+ */
+.align 8
+SYM_FUNC_START(sm4_aesni_avx2_cfb_dec_blk16)
+       /* input:
+        *      %rdi: round key array, CTX
+        *      %rsi: dst (16 blocks)
+        *      %rdx: src (16 blocks)
+        *      %rcx: iv
+        */
+       FRAME_BEGIN
+
+       vzeroupper;
+
+       /* Load input */
+       vmovdqu (%rcx), RNOTx;
+       vinserti128 $1, (%rdx), RNOT, RA0;
+       vmovdqu (0 * 32 + 16)(%rdx), RA1;
+       vmovdqu (1 * 32 + 16)(%rdx), RA2;
+       vmovdqu (2 * 32 + 16)(%rdx), RA3;
+       vmovdqu (3 * 32 + 16)(%rdx), RB0;
+       vmovdqu (4 * 32 + 16)(%rdx), RB1;
+       vmovdqu (5 * 32 + 16)(%rdx), RB2;
+       vmovdqu (6 * 32 + 16)(%rdx), RB3;
+
+       /* Update IV */
+       vmovdqu (7 * 32 + 16)(%rdx), RNOTx;
+       vmovdqu RNOTx, (%rcx);
+
+       call __sm4_crypt_blk16;
+
+       vpxor (0 * 32)(%rdx), RA0, RA0;
+       vpxor (1 * 32)(%rdx), RA1, RA1;
+       vpxor (2 * 32)(%rdx), RA2, RA2;
+       vpxor (3 * 32)(%rdx), RA3, RA3;
+       vpxor (4 * 32)(%rdx), RB0, RB0;
+       vpxor (5 * 32)(%rdx), RB1, RB1;
+       vpxor (6 * 32)(%rdx), RB2, RB2;
+       vpxor (7 * 32)(%rdx), RB3, RB3;
+
+       vmovdqu RA0, (0 * 32)(%rsi);
+       vmovdqu RA1, (1 * 32)(%rsi);
+       vmovdqu RA2, (2 * 32)(%rsi);
+       vmovdqu RA3, (3 * 32)(%rsi);
+       vmovdqu RB0, (4 * 32)(%rsi);
+       vmovdqu RB1, (5 * 32)(%rsi);
+       vmovdqu RB2, (6 * 32)(%rsi);
+       vmovdqu RB3, (7 * 32)(%rsi);
+
+       vzeroall;
+       FRAME_END
+       ret;
+SYM_FUNC_END(sm4_aesni_avx2_cfb_dec_blk16)
diff --git a/arch/x86/crypto/sm4-avx.h b/arch/x86/crypto/sm4-avx.h
new file mode 100644 (file)
index 0000000..1bceab7
--- /dev/null
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef ASM_X86_SM4_AVX_H
+#define ASM_X86_SM4_AVX_H
+
+#include <linux/types.h>
+#include <crypto/sm4.h>
+
+typedef void (*sm4_crypt_func)(const u32 *rk, u8 *dst, const u8 *src, u8 *iv);
+
+int sm4_avx_ecb_encrypt(struct skcipher_request *req);
+int sm4_avx_ecb_decrypt(struct skcipher_request *req);
+
+int sm4_cbc_encrypt(struct skcipher_request *req);
+int sm4_avx_cbc_decrypt(struct skcipher_request *req,
+                       unsigned int bsize, sm4_crypt_func func);
+
+int sm4_cfb_encrypt(struct skcipher_request *req);
+int sm4_avx_cfb_decrypt(struct skcipher_request *req,
+                       unsigned int bsize, sm4_crypt_func func);
+
+int sm4_avx_ctr_crypt(struct skcipher_request *req,
+                       unsigned int bsize, sm4_crypt_func func);
+
+#endif
diff --git a/arch/x86/crypto/sm4_aesni_avx2_glue.c b/arch/x86/crypto/sm4_aesni_avx2_glue.c
new file mode 100644 (file)
index 0000000..84bc718
--- /dev/null
@@ -0,0 +1,169 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * SM4 Cipher Algorithm, AES-NI/AVX2 optimized.
+ * as specified in
+ * https://tools.ietf.org/id/draft-ribose-cfrg-sm4-10.html
+ *
+ * Copyright (c) 2021, Alibaba Group.
+ * Copyright (c) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
+ */
+
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <linux/kernel.h>
+#include <asm/simd.h>
+#include <crypto/internal/simd.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/sm4.h>
+#include "sm4-avx.h"
+
+#define SM4_CRYPT16_BLOCK_SIZE (SM4_BLOCK_SIZE * 16)
+
+asmlinkage void sm4_aesni_avx2_ctr_enc_blk16(const u32 *rk, u8 *dst,
+                                       const u8 *src, u8 *iv);
+asmlinkage void sm4_aesni_avx2_cbc_dec_blk16(const u32 *rk, u8 *dst,
+                                       const u8 *src, u8 *iv);
+asmlinkage void sm4_aesni_avx2_cfb_dec_blk16(const u32 *rk, u8 *dst,
+                                       const u8 *src, u8 *iv);
+
+static int sm4_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
+                       unsigned int key_len)
+{
+       struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+       return sm4_expandkey(ctx, key, key_len);
+}
+
+static int cbc_decrypt(struct skcipher_request *req)
+{
+       return sm4_avx_cbc_decrypt(req, SM4_CRYPT16_BLOCK_SIZE,
+                               sm4_aesni_avx2_cbc_dec_blk16);
+}
+
+
+static int cfb_decrypt(struct skcipher_request *req)
+{
+       return sm4_avx_cfb_decrypt(req, SM4_CRYPT16_BLOCK_SIZE,
+                               sm4_aesni_avx2_cfb_dec_blk16);
+}
+
+static int ctr_crypt(struct skcipher_request *req)
+{
+       return sm4_avx_ctr_crypt(req, SM4_CRYPT16_BLOCK_SIZE,
+                               sm4_aesni_avx2_ctr_enc_blk16);
+}
+
+static struct skcipher_alg sm4_aesni_avx2_skciphers[] = {
+       {
+               .base = {
+                       .cra_name               = "__ecb(sm4)",
+                       .cra_driver_name        = "__ecb-sm4-aesni-avx2",
+                       .cra_priority           = 500,
+                       .cra_flags              = CRYPTO_ALG_INTERNAL,
+                       .cra_blocksize          = SM4_BLOCK_SIZE,
+                       .cra_ctxsize            = sizeof(struct sm4_ctx),
+                       .cra_module             = THIS_MODULE,
+               },
+               .min_keysize    = SM4_KEY_SIZE,
+               .max_keysize    = SM4_KEY_SIZE,
+               .walksize       = 16 * SM4_BLOCK_SIZE,
+               .setkey         = sm4_skcipher_setkey,
+               .encrypt        = sm4_avx_ecb_encrypt,
+               .decrypt        = sm4_avx_ecb_decrypt,
+       }, {
+               .base = {
+                       .cra_name               = "__cbc(sm4)",
+                       .cra_driver_name        = "__cbc-sm4-aesni-avx2",
+                       .cra_priority           = 500,
+                       .cra_flags              = CRYPTO_ALG_INTERNAL,
+                       .cra_blocksize          = SM4_BLOCK_SIZE,
+                       .cra_ctxsize            = sizeof(struct sm4_ctx),
+                       .cra_module             = THIS_MODULE,
+               },
+               .min_keysize    = SM4_KEY_SIZE,
+               .max_keysize    = SM4_KEY_SIZE,
+               .ivsize         = SM4_BLOCK_SIZE,
+               .walksize       = 16 * SM4_BLOCK_SIZE,
+               .setkey         = sm4_skcipher_setkey,
+               .encrypt        = sm4_cbc_encrypt,
+               .decrypt        = cbc_decrypt,
+       }, {
+               .base = {
+                       .cra_name               = "__cfb(sm4)",
+                       .cra_driver_name        = "__cfb-sm4-aesni-avx2",
+                       .cra_priority           = 500,
+                       .cra_flags              = CRYPTO_ALG_INTERNAL,
+                       .cra_blocksize          = 1,
+                       .cra_ctxsize            = sizeof(struct sm4_ctx),
+                       .cra_module             = THIS_MODULE,
+               },
+               .min_keysize    = SM4_KEY_SIZE,
+               .max_keysize    = SM4_KEY_SIZE,
+               .ivsize         = SM4_BLOCK_SIZE,
+               .chunksize      = SM4_BLOCK_SIZE,
+               .walksize       = 16 * SM4_BLOCK_SIZE,
+               .setkey         = sm4_skcipher_setkey,
+               .encrypt        = sm4_cfb_encrypt,
+               .decrypt        = cfb_decrypt,
+       }, {
+               .base = {
+                       .cra_name               = "__ctr(sm4)",
+                       .cra_driver_name        = "__ctr-sm4-aesni-avx2",
+                       .cra_priority           = 500,
+                       .cra_flags              = CRYPTO_ALG_INTERNAL,
+                       .cra_blocksize          = 1,
+                       .cra_ctxsize            = sizeof(struct sm4_ctx),
+                       .cra_module             = THIS_MODULE,
+               },
+               .min_keysize    = SM4_KEY_SIZE,
+               .max_keysize    = SM4_KEY_SIZE,
+               .ivsize         = SM4_BLOCK_SIZE,
+               .chunksize      = SM4_BLOCK_SIZE,
+               .walksize       = 16 * SM4_BLOCK_SIZE,
+               .setkey         = sm4_skcipher_setkey,
+               .encrypt        = ctr_crypt,
+               .decrypt        = ctr_crypt,
+       }
+};
+
+static struct simd_skcipher_alg *
+simd_sm4_aesni_avx2_skciphers[ARRAY_SIZE(sm4_aesni_avx2_skciphers)];
+
+static int __init sm4_init(void)
+{
+       const char *feature_name;
+
+       if (!boot_cpu_has(X86_FEATURE_AVX) ||
+           !boot_cpu_has(X86_FEATURE_AVX2) ||
+           !boot_cpu_has(X86_FEATURE_AES) ||
+           !boot_cpu_has(X86_FEATURE_OSXSAVE)) {
+               pr_info("AVX2 or AES-NI instructions are not detected.\n");
+               return -ENODEV;
+       }
+
+       if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM,
+                               &feature_name)) {
+               pr_info("CPU feature '%s' is not supported.\n", feature_name);
+               return -ENODEV;
+       }
+
+       return simd_register_skciphers_compat(sm4_aesni_avx2_skciphers,
+                                       ARRAY_SIZE(sm4_aesni_avx2_skciphers),
+                                       simd_sm4_aesni_avx2_skciphers);
+}
+
+static void __exit sm4_exit(void)
+{
+       simd_unregister_skciphers(sm4_aesni_avx2_skciphers,
+                               ARRAY_SIZE(sm4_aesni_avx2_skciphers),
+                               simd_sm4_aesni_avx2_skciphers);
+}
+
+module_init(sm4_init);
+module_exit(sm4_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Tianjia Zhang <tianjia.zhang@linux.alibaba.com>");
+MODULE_DESCRIPTION("SM4 Cipher Algorithm, AES-NI/AVX2 optimized");
+MODULE_ALIAS_CRYPTO("sm4");
+MODULE_ALIAS_CRYPTO("sm4-aesni-avx2");
diff --git a/arch/x86/crypto/sm4_aesni_avx_glue.c b/arch/x86/crypto/sm4_aesni_avx_glue.c
new file mode 100644 (file)
index 0000000..7800f77
--- /dev/null
@@ -0,0 +1,487 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * SM4 Cipher Algorithm, AES-NI/AVX optimized.
+ * as specified in
+ * https://tools.ietf.org/id/draft-ribose-cfrg-sm4-10.html
+ *
+ * Copyright (c) 2021, Alibaba Group.
+ * Copyright (c) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
+ */
+
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <linux/kernel.h>
+#include <asm/simd.h>
+#include <crypto/internal/simd.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/sm4.h>
+#include "sm4-avx.h"
+
+#define SM4_CRYPT8_BLOCK_SIZE  (SM4_BLOCK_SIZE * 8)
+
+asmlinkage void sm4_aesni_avx_crypt4(const u32 *rk, u8 *dst,
+                               const u8 *src, int nblocks);
+asmlinkage void sm4_aesni_avx_crypt8(const u32 *rk, u8 *dst,
+                               const u8 *src, int nblocks);
+asmlinkage void sm4_aesni_avx_ctr_enc_blk8(const u32 *rk, u8 *dst,
+                               const u8 *src, u8 *iv);
+asmlinkage void sm4_aesni_avx_cbc_dec_blk8(const u32 *rk, u8 *dst,
+                               const u8 *src, u8 *iv);
+asmlinkage void sm4_aesni_avx_cfb_dec_blk8(const u32 *rk, u8 *dst,
+                               const u8 *src, u8 *iv);
+
+static int sm4_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
+                       unsigned int key_len)
+{
+       struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+       return sm4_expandkey(ctx, key, key_len);
+}
+
+static int ecb_do_crypt(struct skcipher_request *req, const u32 *rkey)
+{
+       struct skcipher_walk walk;
+       unsigned int nbytes;
+       int err;
+
+       err = skcipher_walk_virt(&walk, req, false);
+
+       while ((nbytes = walk.nbytes) > 0) {
+               const u8 *src = walk.src.virt.addr;
+               u8 *dst = walk.dst.virt.addr;
+
+               kernel_fpu_begin();
+               while (nbytes >= SM4_CRYPT8_BLOCK_SIZE) {
+                       sm4_aesni_avx_crypt8(rkey, dst, src, 8);
+                       dst += SM4_CRYPT8_BLOCK_SIZE;
+                       src += SM4_CRYPT8_BLOCK_SIZE;
+                       nbytes -= SM4_CRYPT8_BLOCK_SIZE;
+               }
+               while (nbytes >= SM4_BLOCK_SIZE) {
+                       unsigned int nblocks = min(nbytes >> 4, 4u);
+                       sm4_aesni_avx_crypt4(rkey, dst, src, nblocks);
+                       dst += nblocks * SM4_BLOCK_SIZE;
+                       src += nblocks * SM4_BLOCK_SIZE;
+                       nbytes -= nblocks * SM4_BLOCK_SIZE;
+               }
+               kernel_fpu_end();
+
+               err = skcipher_walk_done(&walk, nbytes);
+       }
+
+       return err;
+}
+
+int sm4_avx_ecb_encrypt(struct skcipher_request *req)
+{
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+       return ecb_do_crypt(req, ctx->rkey_enc);
+}
+EXPORT_SYMBOL_GPL(sm4_avx_ecb_encrypt);
+
+int sm4_avx_ecb_decrypt(struct skcipher_request *req)
+{
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+       return ecb_do_crypt(req, ctx->rkey_dec);
+}
+EXPORT_SYMBOL_GPL(sm4_avx_ecb_decrypt);
+
+int sm4_cbc_encrypt(struct skcipher_request *req)
+{
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
+       struct skcipher_walk walk;
+       unsigned int nbytes;
+       int err;
+
+       err = skcipher_walk_virt(&walk, req, false);
+
+       while ((nbytes = walk.nbytes) > 0) {
+               const u8 *iv = walk.iv;
+               const u8 *src = walk.src.virt.addr;
+               u8 *dst = walk.dst.virt.addr;
+
+               while (nbytes >= SM4_BLOCK_SIZE) {
+                       crypto_xor_cpy(dst, src, iv, SM4_BLOCK_SIZE);
+                       sm4_crypt_block(ctx->rkey_enc, dst, dst);
+                       iv = dst;
+                       src += SM4_BLOCK_SIZE;
+                       dst += SM4_BLOCK_SIZE;
+                       nbytes -= SM4_BLOCK_SIZE;
+               }
+               if (iv != walk.iv)
+                       memcpy(walk.iv, iv, SM4_BLOCK_SIZE);
+
+               err = skcipher_walk_done(&walk, nbytes);
+       }
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(sm4_cbc_encrypt);
+
+int sm4_avx_cbc_decrypt(struct skcipher_request *req,
+                       unsigned int bsize, sm4_crypt_func func)
+{
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
+       struct skcipher_walk walk;
+       unsigned int nbytes;
+       int err;
+
+       err = skcipher_walk_virt(&walk, req, false);
+
+       while ((nbytes = walk.nbytes) > 0) {
+               const u8 *src = walk.src.virt.addr;
+               u8 *dst = walk.dst.virt.addr;
+
+               kernel_fpu_begin();
+
+               while (nbytes >= bsize) {
+                       func(ctx->rkey_dec, dst, src, walk.iv);
+                       dst += bsize;
+                       src += bsize;
+                       nbytes -= bsize;
+               }
+
+               while (nbytes >= SM4_BLOCK_SIZE) {
+                       u8 keystream[SM4_BLOCK_SIZE * 8];
+                       u8 iv[SM4_BLOCK_SIZE];
+                       unsigned int nblocks = min(nbytes >> 4, 8u);
+                       int i;
+
+                       sm4_aesni_avx_crypt8(ctx->rkey_dec, keystream,
+                                               src, nblocks);
+
+                       src += ((int)nblocks - 2) * SM4_BLOCK_SIZE;
+                       dst += (nblocks - 1) * SM4_BLOCK_SIZE;
+                       memcpy(iv, src + SM4_BLOCK_SIZE, SM4_BLOCK_SIZE);
+
+                       for (i = nblocks - 1; i > 0; i--) {
+                               crypto_xor_cpy(dst, src,
+                                       &keystream[i * SM4_BLOCK_SIZE],
+                                       SM4_BLOCK_SIZE);
+                               src -= SM4_BLOCK_SIZE;
+                               dst -= SM4_BLOCK_SIZE;
+                       }
+                       crypto_xor_cpy(dst, walk.iv, keystream, SM4_BLOCK_SIZE);
+                       memcpy(walk.iv, iv, SM4_BLOCK_SIZE);
+                       dst += nblocks * SM4_BLOCK_SIZE;
+                       src += (nblocks + 1) * SM4_BLOCK_SIZE;
+                       nbytes -= nblocks * SM4_BLOCK_SIZE;
+               }
+
+               kernel_fpu_end();
+               err = skcipher_walk_done(&walk, nbytes);
+       }
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(sm4_avx_cbc_decrypt);
+
+static int cbc_decrypt(struct skcipher_request *req)
+{
+       return sm4_avx_cbc_decrypt(req, SM4_CRYPT8_BLOCK_SIZE,
+                               sm4_aesni_avx_cbc_dec_blk8);
+}
+
+int sm4_cfb_encrypt(struct skcipher_request *req)
+{
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
+       struct skcipher_walk walk;
+       unsigned int nbytes;
+       int err;
+
+       err = skcipher_walk_virt(&walk, req, false);
+
+       while ((nbytes = walk.nbytes) > 0) {
+               u8 keystream[SM4_BLOCK_SIZE];
+               const u8 *iv = walk.iv;
+               const u8 *src = walk.src.virt.addr;
+               u8 *dst = walk.dst.virt.addr;
+
+               while (nbytes >= SM4_BLOCK_SIZE) {
+                       sm4_crypt_block(ctx->rkey_enc, keystream, iv);
+                       crypto_xor_cpy(dst, src, keystream, SM4_BLOCK_SIZE);
+                       iv = dst;
+                       src += SM4_BLOCK_SIZE;
+                       dst += SM4_BLOCK_SIZE;
+                       nbytes -= SM4_BLOCK_SIZE;
+               }
+               if (iv != walk.iv)
+                       memcpy(walk.iv, iv, SM4_BLOCK_SIZE);
+
+               /* tail */
+               if (walk.nbytes == walk.total && nbytes > 0) {
+                       sm4_crypt_block(ctx->rkey_enc, keystream, walk.iv);
+                       crypto_xor_cpy(dst, src, keystream, nbytes);
+                       nbytes = 0;
+               }
+
+               err = skcipher_walk_done(&walk, nbytes);
+       }
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(sm4_cfb_encrypt);
+
+int sm4_avx_cfb_decrypt(struct skcipher_request *req,
+                       unsigned int bsize, sm4_crypt_func func)
+{
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
+       struct skcipher_walk walk;
+       unsigned int nbytes;
+       int err;
+
+       err = skcipher_walk_virt(&walk, req, false);
+
+       while ((nbytes = walk.nbytes) > 0) {
+               const u8 *src = walk.src.virt.addr;
+               u8 *dst = walk.dst.virt.addr;
+
+               kernel_fpu_begin();
+
+               while (nbytes >= bsize) {
+                       func(ctx->rkey_enc, dst, src, walk.iv);
+                       dst += bsize;
+                       src += bsize;
+                       nbytes -= bsize;
+               }
+
+               while (nbytes >= SM4_BLOCK_SIZE) {
+                       u8 keystream[SM4_BLOCK_SIZE * 8];
+                       unsigned int nblocks = min(nbytes >> 4, 8u);
+
+                       memcpy(keystream, walk.iv, SM4_BLOCK_SIZE);
+                       if (nblocks > 1)
+                               memcpy(&keystream[SM4_BLOCK_SIZE], src,
+                                       (nblocks - 1) * SM4_BLOCK_SIZE);
+                       memcpy(walk.iv, src + (nblocks - 1) * SM4_BLOCK_SIZE,
+                               SM4_BLOCK_SIZE);
+
+                       sm4_aesni_avx_crypt8(ctx->rkey_enc, keystream,
+                                               keystream, nblocks);
+
+                       crypto_xor_cpy(dst, src, keystream,
+                                       nblocks * SM4_BLOCK_SIZE);
+                       dst += nblocks * SM4_BLOCK_SIZE;
+                       src += nblocks * SM4_BLOCK_SIZE;
+                       nbytes -= nblocks * SM4_BLOCK_SIZE;
+               }
+
+               kernel_fpu_end();
+
+               /* tail */
+               if (walk.nbytes == walk.total && nbytes > 0) {
+                       u8 keystream[SM4_BLOCK_SIZE];
+
+                       sm4_crypt_block(ctx->rkey_enc, keystream, walk.iv);
+                       crypto_xor_cpy(dst, src, keystream, nbytes);
+                       nbytes = 0;
+               }
+
+               err = skcipher_walk_done(&walk, nbytes);
+       }
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(sm4_avx_cfb_decrypt);
+
+static int cfb_decrypt(struct skcipher_request *req)
+{
+       return sm4_avx_cfb_decrypt(req, SM4_CRYPT8_BLOCK_SIZE,
+                               sm4_aesni_avx_cfb_dec_blk8);
+}
+
+int sm4_avx_ctr_crypt(struct skcipher_request *req,
+                       unsigned int bsize, sm4_crypt_func func)
+{
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
+       struct skcipher_walk walk;
+       unsigned int nbytes;
+       int err;
+
+       err = skcipher_walk_virt(&walk, req, false);
+
+       while ((nbytes = walk.nbytes) > 0) {
+               const u8 *src = walk.src.virt.addr;
+               u8 *dst = walk.dst.virt.addr;
+
+               kernel_fpu_begin();
+
+               while (nbytes >= bsize) {
+                       func(ctx->rkey_enc, dst, src, walk.iv);
+                       dst += bsize;
+                       src += bsize;
+                       nbytes -= bsize;
+               }
+
+               while (nbytes >= SM4_BLOCK_SIZE) {
+                       u8 keystream[SM4_BLOCK_SIZE * 8];
+                       unsigned int nblocks = min(nbytes >> 4, 8u);
+                       int i;
+
+                       for (i = 0; i < nblocks; i++) {
+                               memcpy(&keystream[i * SM4_BLOCK_SIZE],
+                                       walk.iv, SM4_BLOCK_SIZE);
+                               crypto_inc(walk.iv, SM4_BLOCK_SIZE);
+                       }
+                       sm4_aesni_avx_crypt8(ctx->rkey_enc, keystream,
+                                       keystream, nblocks);
+
+                       crypto_xor_cpy(dst, src, keystream,
+                                       nblocks * SM4_BLOCK_SIZE);
+                       dst += nblocks * SM4_BLOCK_SIZE;
+                       src += nblocks * SM4_BLOCK_SIZE;
+                       nbytes -= nblocks * SM4_BLOCK_SIZE;
+               }
+
+               kernel_fpu_end();
+
+               /* tail */
+               if (walk.nbytes == walk.total && nbytes > 0) {
+                       u8 keystream[SM4_BLOCK_SIZE];
+
+                       memcpy(keystream, walk.iv, SM4_BLOCK_SIZE);
+                       crypto_inc(walk.iv, SM4_BLOCK_SIZE);
+
+                       sm4_crypt_block(ctx->rkey_enc, keystream, keystream);
+
+                       crypto_xor_cpy(dst, src, keystream, nbytes);
+                       dst += nbytes;
+                       src += nbytes;
+                       nbytes = 0;
+               }
+
+               err = skcipher_walk_done(&walk, nbytes);
+       }
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(sm4_avx_ctr_crypt);
+
+static int ctr_crypt(struct skcipher_request *req)
+{
+       return sm4_avx_ctr_crypt(req, SM4_CRYPT8_BLOCK_SIZE,
+                               sm4_aesni_avx_ctr_enc_blk8);
+}
+
+static struct skcipher_alg sm4_aesni_avx_skciphers[] = {
+       {
+               .base = {
+                       .cra_name               = "__ecb(sm4)",
+                       .cra_driver_name        = "__ecb-sm4-aesni-avx",
+                       .cra_priority           = 400,
+                       .cra_flags              = CRYPTO_ALG_INTERNAL,
+                       .cra_blocksize          = SM4_BLOCK_SIZE,
+                       .cra_ctxsize            = sizeof(struct sm4_ctx),
+                       .cra_module             = THIS_MODULE,
+               },
+               .min_keysize    = SM4_KEY_SIZE,
+               .max_keysize    = SM4_KEY_SIZE,
+               .walksize       = 8 * SM4_BLOCK_SIZE,
+               .setkey         = sm4_skcipher_setkey,
+               .encrypt        = sm4_avx_ecb_encrypt,
+               .decrypt        = sm4_avx_ecb_decrypt,
+       }, {
+               .base = {
+                       .cra_name               = "__cbc(sm4)",
+                       .cra_driver_name        = "__cbc-sm4-aesni-avx",
+                       .cra_priority           = 400,
+                       .cra_flags              = CRYPTO_ALG_INTERNAL,
+                       .cra_blocksize          = SM4_BLOCK_SIZE,
+                       .cra_ctxsize            = sizeof(struct sm4_ctx),
+                       .cra_module             = THIS_MODULE,
+               },
+               .min_keysize    = SM4_KEY_SIZE,
+               .max_keysize    = SM4_KEY_SIZE,
+               .ivsize         = SM4_BLOCK_SIZE,
+               .walksize       = 8 * SM4_BLOCK_SIZE,
+               .setkey         = sm4_skcipher_setkey,
+               .encrypt        = sm4_cbc_encrypt,
+               .decrypt        = cbc_decrypt,
+       }, {
+               .base = {
+                       .cra_name               = "__cfb(sm4)",
+                       .cra_driver_name        = "__cfb-sm4-aesni-avx",
+                       .cra_priority           = 400,
+                       .cra_flags              = CRYPTO_ALG_INTERNAL,
+                       .cra_blocksize          = 1,
+                       .cra_ctxsize            = sizeof(struct sm4_ctx),
+                       .cra_module             = THIS_MODULE,
+               },
+               .min_keysize    = SM4_KEY_SIZE,
+               .max_keysize    = SM4_KEY_SIZE,
+               .ivsize         = SM4_BLOCK_SIZE,
+               .chunksize      = SM4_BLOCK_SIZE,
+               .walksize       = 8 * SM4_BLOCK_SIZE,
+               .setkey         = sm4_skcipher_setkey,
+               .encrypt        = sm4_cfb_encrypt,
+               .decrypt        = cfb_decrypt,
+       }, {
+               .base = {
+                       .cra_name               = "__ctr(sm4)",
+                       .cra_driver_name        = "__ctr-sm4-aesni-avx",
+                       .cra_priority           = 400,
+                       .cra_flags              = CRYPTO_ALG_INTERNAL,
+                       .cra_blocksize          = 1,
+                       .cra_ctxsize            = sizeof(struct sm4_ctx),
+                       .cra_module             = THIS_MODULE,
+               },
+               .min_keysize    = SM4_KEY_SIZE,
+               .max_keysize    = SM4_KEY_SIZE,
+               .ivsize         = SM4_BLOCK_SIZE,
+               .chunksize      = SM4_BLOCK_SIZE,
+               .walksize       = 8 * SM4_BLOCK_SIZE,
+               .setkey         = sm4_skcipher_setkey,
+               .encrypt        = ctr_crypt,
+               .decrypt        = ctr_crypt,
+       }
+};
+
+static struct simd_skcipher_alg *
+simd_sm4_aesni_avx_skciphers[ARRAY_SIZE(sm4_aesni_avx_skciphers)];
+
+static int __init sm4_init(void)
+{
+       const char *feature_name;
+
+       if (!boot_cpu_has(X86_FEATURE_AVX) ||
+           !boot_cpu_has(X86_FEATURE_AES) ||
+           !boot_cpu_has(X86_FEATURE_OSXSAVE)) {
+               pr_info("AVX or AES-NI instructions are not detected.\n");
+               return -ENODEV;
+       }
+
+       if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM,
+                               &feature_name)) {
+               pr_info("CPU feature '%s' is not supported.\n", feature_name);
+               return -ENODEV;
+       }
+
+       return simd_register_skciphers_compat(sm4_aesni_avx_skciphers,
+                                       ARRAY_SIZE(sm4_aesni_avx_skciphers),
+                                       simd_sm4_aesni_avx_skciphers);
+}
+
+static void __exit sm4_exit(void)
+{
+       simd_unregister_skciphers(sm4_aesni_avx_skciphers,
+                                       ARRAY_SIZE(sm4_aesni_avx_skciphers),
+                                       simd_sm4_aesni_avx_skciphers);
+}
+
+module_init(sm4_init);
+module_exit(sm4_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Tianjia Zhang <tianjia.zhang@linux.alibaba.com>");
+MODULE_DESCRIPTION("SM4 Cipher Algorithm, AES-NI/AVX optimized");
+MODULE_ALIAS_CRYPTO("sm4");
+MODULE_ALIAS_CRYPTO("sm4-aesni-avx");
index 39d9ded..d6cdfe6 100644 (file)
@@ -34,4 +34,14 @@ config PERF_EVENTS_AMD_POWER
          (CPUID Fn8000_0007_EDX[12]) interface to calculate the
          average power consumption on Family 15h processors.
 
+config PERF_EVENTS_AMD_UNCORE
+       tristate "AMD Uncore performance events"
+       depends on PERF_EVENTS && CPU_SUP_AMD
+       default y
+       help
+         Include support for AMD uncore performance events for use with
+         e.g., perf stat -e amd_l3/.../,amd_df/.../.
+
+         To compile this driver as a module, choose M here: the
+         module will be called 'amd-uncore'.
 endmenu
index fe8795a..6cbe38d 100644 (file)
@@ -1,8 +1,9 @@
 # SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_CPU_SUP_AMD)              += core.o uncore.o
+obj-$(CONFIG_CPU_SUP_AMD)              += core.o
 obj-$(CONFIG_PERF_EVENTS_AMD_POWER)    += power.o
 obj-$(CONFIG_X86_LOCAL_APIC)           += ibs.o
+obj-$(CONFIG_PERF_EVENTS_AMD_UNCORE)   += amd-uncore.o
+amd-uncore-objs                                := uncore.o
 ifdef CONFIG_AMD_IOMMU
 obj-$(CONFIG_CPU_SUP_AMD)              += iommu.o
 endif
-
index 40669ea..9739019 100644 (file)
@@ -26,6 +26,7 @@ static u32 ibs_caps;
 #include <linux/hardirq.h>
 
 #include <asm/nmi.h>
+#include <asm/amd-ibs.h>
 
 #define IBS_FETCH_CONFIG_MASK  (IBS_FETCH_RAND_EN | IBS_FETCH_MAX_CNT)
 #define IBS_OP_CONFIG_MASK     IBS_OP_MAX_CNT
@@ -90,6 +91,7 @@ struct perf_ibs {
        unsigned long                   offset_mask[1];
        int                             offset_max;
        unsigned int                    fetch_count_reset_broken : 1;
+       unsigned int                    fetch_ignore_if_zero_rip : 1;
        struct cpu_perf_ibs __percpu    *pcpu;
 
        struct attribute                **format_attrs;
@@ -99,15 +101,6 @@ struct perf_ibs {
        u64                             (*get_count)(u64 config);
 };
 
-struct perf_ibs_data {
-       u32             size;
-       union {
-               u32     data[0];        /* data buffer starts here */
-               u32     caps;
-       };
-       u64             regs[MSR_AMD64_IBS_REG_COUNT_MAX];
-};
-
 static int
 perf_event_set_period(struct hw_perf_event *hwc, u64 min, u64 max, u64 *hw_period)
 {
@@ -328,11 +321,14 @@ static int perf_ibs_set_period(struct perf_ibs *perf_ibs,
 
 static u64 get_ibs_fetch_count(u64 config)
 {
-       return (config & IBS_FETCH_CNT) >> 12;
+       union ibs_fetch_ctl fetch_ctl = (union ibs_fetch_ctl)config;
+
+       return fetch_ctl.fetch_cnt << 4;
 }
 
 static u64 get_ibs_op_count(u64 config)
 {
+       union ibs_op_ctl op_ctl = (union ibs_op_ctl)config;
        u64 count = 0;
 
        /*
@@ -340,12 +336,12 @@ static u64 get_ibs_op_count(u64 config)
         * and the lower 7 bits of CurCnt are randomized.
         * Otherwise CurCnt has the full 27-bit current counter value.
         */
-       if (config & IBS_OP_VAL) {
-               count = (config & IBS_OP_MAX_CNT) << 4;
+       if (op_ctl.op_val) {
+               count = op_ctl.opmaxcnt << 4;
                if (ibs_caps & IBS_CAPS_OPCNTEXT)
-                       count += config & IBS_OP_MAX_CNT_EXT_MASK;
+                       count += op_ctl.opmaxcnt_ext << 20;
        } else if (ibs_caps & IBS_CAPS_RDWROPCNT) {
-               count = (config & IBS_OP_CUR_CNT) >> 32;
+               count = op_ctl.opcurcnt;
        }
 
        return count;
@@ -570,6 +566,7 @@ static struct perf_ibs perf_ibs_op = {
                .start          = perf_ibs_start,
                .stop           = perf_ibs_stop,
                .read           = perf_ibs_read,
+               .capabilities   = PERF_PMU_CAP_NO_EXCLUDE,
        },
        .msr                    = MSR_AMD64_IBSOPCTL,
        .config_mask            = IBS_OP_CONFIG_MASK,
@@ -672,6 +669,10 @@ fail:
        if (check_rip && (ibs_data.regs[2] & IBS_RIP_INVALID)) {
                regs.flags &= ~PERF_EFLAGS_EXACT;
        } else {
+               /* Workaround for erratum #1197 */
+               if (perf_ibs->fetch_ignore_if_zero_rip && !(ibs_data.regs[1]))
+                       goto out;
+
                set_linear_ip(&regs, ibs_data.regs[1]);
                regs.flags |= PERF_EFLAGS_EXACT;
        }
@@ -769,6 +770,9 @@ static __init void perf_event_ibs_init(void)
        if (boot_cpu_data.x86 >= 0x16 && boot_cpu_data.x86 <= 0x18)
                perf_ibs_fetch.fetch_count_reset_broken = 1;
 
+       if (boot_cpu_data.x86 == 0x19 && boot_cpu_data.x86_model < 0x10)
+               perf_ibs_fetch.fetch_ignore_if_zero_rip = 1;
+
        perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch");
 
        if (ibs_caps & IBS_CAPS_OPCNT) {
index 16a2369..37d5b38 100644 (file)
@@ -213,6 +213,7 @@ static struct pmu pmu_class = {
        .stop           = pmu_event_stop,
        .read           = pmu_event_read,
        .capabilities   = PERF_PMU_CAP_NO_EXCLUDE,
+       .module         = THIS_MODULE,
 };
 
 static int power_cpu_exit(unsigned int cpu)
index 582c0ff..0d04414 100644 (file)
 #include <linux/init.h>
 #include <linux/cpu.h>
 #include <linux/cpumask.h>
+#include <linux/cpufeature.h>
+#include <linux/smp.h>
 
-#include <asm/cpufeature.h>
 #include <asm/perf_event.h>
 #include <asm/msr.h>
-#include <asm/smp.h>
 
 #define NUM_COUNTERS_NB                4
 #define NUM_COUNTERS_L2                4
@@ -347,6 +347,7 @@ static struct pmu amd_nb_pmu = {
        .stop           = amd_uncore_stop,
        .read           = amd_uncore_read,
        .capabilities   = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT,
+       .module         = THIS_MODULE,
 };
 
 static struct pmu amd_llc_pmu = {
@@ -360,6 +361,7 @@ static struct pmu amd_llc_pmu = {
        .stop           = amd_uncore_stop,
        .read           = amd_uncore_read,
        .capabilities   = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT,
+       .module         = THIS_MODULE,
 };
 
 static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
@@ -452,7 +454,7 @@ static int amd_uncore_cpu_starting(unsigned int cpu)
 
        if (amd_uncore_llc) {
                uncore = *per_cpu_ptr(amd_uncore_llc, cpu);
-               uncore->id = per_cpu(cpu_llc_id, cpu);
+               uncore->id = get_llc_id(cpu);
 
                uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_llc);
                *per_cpu_ptr(amd_uncore_llc, cpu) = uncore;
@@ -659,12 +661,34 @@ fail_prep:
 fail_llc:
        if (boot_cpu_has(X86_FEATURE_PERFCTR_NB))
                perf_pmu_unregister(&amd_nb_pmu);
-       if (amd_uncore_llc)
-               free_percpu(amd_uncore_llc);
+       free_percpu(amd_uncore_llc);
 fail_nb:
-       if (amd_uncore_nb)
-               free_percpu(amd_uncore_nb);
+       free_percpu(amd_uncore_nb);
 
        return ret;
 }
-device_initcall(amd_uncore_init);
+
+static void __exit amd_uncore_exit(void)
+{
+       cpuhp_remove_state(CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE);
+       cpuhp_remove_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING);
+       cpuhp_remove_state(CPUHP_PERF_X86_AMD_UNCORE_PREP);
+
+       if (boot_cpu_has(X86_FEATURE_PERFCTR_LLC)) {
+               perf_pmu_unregister(&amd_llc_pmu);
+               free_percpu(amd_uncore_llc);
+               amd_uncore_llc = NULL;
+       }
+
+       if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) {
+               perf_pmu_unregister(&amd_nb_pmu);
+               free_percpu(amd_uncore_nb);
+               amd_uncore_nb = NULL;
+       }
+}
+
+module_init(amd_uncore_init);
+module_exit(amd_uncore_exit);
+
+MODULE_DESCRIPTION("AMD Uncore Driver");
+MODULE_LICENSE("GPL v2");
index 1eb4513..2a57dbe 100644 (file)
@@ -1087,10 +1087,8 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
         * validate an event group (assign == NULL)
         */
        if (!unsched && assign) {
-               for (i = 0; i < n; i++) {
-                       e = cpuc->event_list[i];
+               for (i = 0; i < n; i++)
                        static_call_cond(x86_pmu_commit_scheduling)(cpuc, i, assign[i]);
-               }
        } else {
                for (i = n0; i < n; i++) {
                        e = cpuc->event_list[i];
@@ -2489,13 +2487,15 @@ void perf_clear_dirty_counters(void)
                return;
 
        for_each_set_bit(i, cpuc->dirty, X86_PMC_IDX_MAX) {
-               /* Metrics and fake events don't have corresponding HW counters. */
-               if (is_metric_idx(i) || (i == INTEL_PMC_IDX_FIXED_VLBR))
-                       continue;
-               else if (i >= INTEL_PMC_IDX_FIXED)
+               if (i >= INTEL_PMC_IDX_FIXED) {
+                       /* Metrics and fake events don't have corresponding HW counters. */
+                       if ((i - INTEL_PMC_IDX_FIXED) >= hybrid(cpuc->pmu, num_counters_fixed))
+                               continue;
+
                        wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + (i - INTEL_PMC_IDX_FIXED), 0);
-               else
+               } else {
                        wrmsrl(x86_pmu_event_addr(i), 0);
+               }
        }
 
        bitmap_zero(cpuc->dirty, X86_PMC_IDX_MAX);
index fca7a6e..7011e87 100644 (file)
@@ -2904,24 +2904,28 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
  */
 static int intel_pmu_handle_irq(struct pt_regs *regs)
 {
-       struct cpu_hw_events *cpuc;
+       struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+       bool late_ack = hybrid_bit(cpuc->pmu, late_ack);
+       bool mid_ack = hybrid_bit(cpuc->pmu, mid_ack);
        int loops;
        u64 status;
        int handled;
        int pmu_enabled;
 
-       cpuc = this_cpu_ptr(&cpu_hw_events);
-
        /*
         * Save the PMU state.
         * It needs to be restored when leaving the handler.
         */
        pmu_enabled = cpuc->enabled;
        /*
-        * No known reason to not always do late ACK,
-        * but just in case do it opt-in.
+        * In general, the early ACK is only applied for old platforms.
+        * For the big core starts from Haswell, the late ACK should be
+        * applied.
+        * For the small core after Tremont, we have to do the ACK right
+        * before re-enabling counters, which is in the middle of the
+        * NMI handler.
         */
-       if (!x86_pmu.late_ack)
+       if (!late_ack && !mid_ack)
                apic_write(APIC_LVTPC, APIC_DM_NMI);
        intel_bts_disable_local();
        cpuc->enabled = 0;
@@ -2958,6 +2962,8 @@ again:
                goto again;
 
 done:
+       if (mid_ack)
+               apic_write(APIC_LVTPC, APIC_DM_NMI);
        /* Only restore PMU state when it's active. See x86_pmu_disable(). */
        cpuc->enabled = pmu_enabled;
        if (pmu_enabled)
@@ -2969,7 +2975,7 @@ done:
         * have been reset. This avoids spurious NMIs on
         * Haswell CPUs.
         */
-       if (x86_pmu.late_ack)
+       if (late_ack)
                apic_write(APIC_LVTPC, APIC_DM_NMI);
        return handled;
 }
@@ -5026,9 +5032,9 @@ static ssize_t freeze_on_smi_store(struct device *cdev,
 
        x86_pmu.attr_freeze_on_smi = val;
 
-       get_online_cpus();
+       cpus_read_lock();
        on_each_cpu(flip_smm_bit, &val, 1);
-       put_online_cpus();
+       cpus_read_unlock();
 done:
        mutex_unlock(&freeze_on_smi_mutex);
 
@@ -5071,9 +5077,9 @@ static ssize_t set_sysctl_tfa(struct device *cdev,
 
        allow_tsx_force_abort = val;
 
-       get_online_cpus();
+       cpus_read_lock();
        on_each_cpu(update_tfa_sched, NULL, 1);
-       put_online_cpus();
+       cpus_read_unlock();
 
        return count;
 }
@@ -6129,7 +6135,6 @@ __init int intel_pmu_init(void)
                static_branch_enable(&perf_is_hybrid);
                x86_pmu.num_hybrid_pmus = X86_HYBRID_NUM_PMUS;
 
-               x86_pmu.late_ack = true;
                x86_pmu.pebs_aliases = NULL;
                x86_pmu.pebs_prec_dist = true;
                x86_pmu.pebs_block = true;
@@ -6167,6 +6172,7 @@ __init int intel_pmu_init(void)
                pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX];
                pmu->name = "cpu_core";
                pmu->cpu_type = hybrid_big;
+               pmu->late_ack = true;
                if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) {
                        pmu->num_counters = x86_pmu.num_counters + 2;
                        pmu->num_counters_fixed = x86_pmu.num_counters_fixed + 1;
@@ -6192,6 +6198,7 @@ __init int intel_pmu_init(void)
                pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX];
                pmu->name = "cpu_atom";
                pmu->cpu_type = hybrid_small;
+               pmu->mid_ack = true;
                pmu->num_counters = x86_pmu.num_counters;
                pmu->num_counters_fixed = x86_pmu.num_counters_fixed;
                pmu->max_pebs_events = x86_pmu.max_pebs_events;
index 9158476..7f406c1 100644 (file)
@@ -62,7 +62,7 @@ static struct pt_cap_desc {
        PT_CAP(single_range_output,     0, CPUID_ECX, BIT(2)),
        PT_CAP(output_subsys,           0, CPUID_ECX, BIT(3)),
        PT_CAP(payloads_lip,            0, CPUID_ECX, BIT(31)),
-       PT_CAP(num_address_ranges,      1, CPUID_EAX, 0x3),
+       PT_CAP(num_address_ranges,      1, CPUID_EAX, 0x7),
        PT_CAP(mtc_periods,             1, CPUID_EAX, 0xffff0000),
        PT_CAP(cycle_thresholds,        1, CPUID_EBX, 0xffff),
        PT_CAP(psb_periods,             1, CPUID_EBX, 0xffff0000),
@@ -1708,7 +1708,7 @@ static __init int pt_init(void)
        if (!boot_cpu_has(X86_FEATURE_INTEL_PT))
                return -ENODEV;
 
-       get_online_cpus();
+       cpus_read_lock();
        for_each_online_cpu(cpu) {
                u64 ctl;
 
@@ -1716,7 +1716,7 @@ static __init int pt_init(void)
                if (!ret && (ctl & RTIT_CTL_TRACEEN))
                        prior_warn++;
        }
-       put_online_cpus();
+       cpus_read_unlock();
 
        if (prior_warn) {
                x86_add_exclusive(x86_lbr_exclusive_pt);
index 9bf4dbb..c72e368 100644 (file)
@@ -842,6 +842,18 @@ static const struct attribute_group uncore_pmu_attr_group = {
        .attrs = uncore_pmu_attrs,
 };
 
+void uncore_get_alias_name(char *pmu_name, struct intel_uncore_pmu *pmu)
+{
+       struct intel_uncore_type *type = pmu->type;
+
+       if (type->num_boxes == 1)
+               sprintf(pmu_name, "uncore_type_%u", type->type_id);
+       else {
+               sprintf(pmu_name, "uncore_type_%u_%d",
+                       type->type_id, type->box_ids[pmu->pmu_idx]);
+       }
+}
+
 static void uncore_get_pmu_name(struct intel_uncore_pmu *pmu)
 {
        struct intel_uncore_type *type = pmu->type;
@@ -851,12 +863,7 @@ static void uncore_get_pmu_name(struct intel_uncore_pmu *pmu)
         * Use uncore_type_&typeid_&boxid as name.
         */
        if (!type->name) {
-               if (type->num_boxes == 1)
-                       sprintf(pmu->name, "uncore_type_%u", type->type_id);
-               else {
-                       sprintf(pmu->name, "uncore_type_%u_%d",
-                               type->type_id, type->box_ids[pmu->pmu_idx]);
-               }
+               uncore_get_alias_name(pmu->name, pmu);
                return;
        }
 
@@ -865,9 +872,13 @@ static void uncore_get_pmu_name(struct intel_uncore_pmu *pmu)
                        sprintf(pmu->name, "uncore_%s", type->name);
                else
                        sprintf(pmu->name, "uncore");
-       } else
-               sprintf(pmu->name, "uncore_%s_%d", type->name, pmu->pmu_idx);
-
+       } else {
+               /*
+                * Use the box ID from the discovery table if applicable.
+                */
+               sprintf(pmu->name, "uncore_%s_%d", type->name,
+                       type->box_ids ? type->box_ids[pmu->pmu_idx] : pmu->pmu_idx);
+       }
 }
 
 static int uncore_pmu_register(struct intel_uncore_pmu *pmu)
@@ -1663,6 +1674,7 @@ struct intel_uncore_init_fun {
        void    (*cpu_init)(void);
        int     (*pci_init)(void);
        void    (*mmio_init)(void);
+       bool    use_discovery;
 };
 
 static const struct intel_uncore_init_fun nhm_uncore_init __initconst = {
@@ -1765,6 +1777,13 @@ static const struct intel_uncore_init_fun snr_uncore_init __initconst = {
        .mmio_init = snr_uncore_mmio_init,
 };
 
+static const struct intel_uncore_init_fun spr_uncore_init __initconst = {
+       .cpu_init = spr_uncore_cpu_init,
+       .pci_init = spr_uncore_pci_init,
+       .mmio_init = spr_uncore_mmio_init,
+       .use_discovery = true,
+};
+
 static const struct intel_uncore_init_fun generic_uncore_init __initconst = {
        .cpu_init = intel_uncore_generic_uncore_cpu_init,
        .pci_init = intel_uncore_generic_uncore_pci_init,
@@ -1809,6 +1828,7 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = {
        X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE,          &rkl_uncore_init),
        X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE,           &adl_uncore_init),
        X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L,         &adl_uncore_init),
+       X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X,    &spr_uncore_init),
        X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D,      &snr_uncore_init),
        {},
 };
@@ -1832,8 +1852,13 @@ static int __init intel_uncore_init(void)
                        uncore_init = (struct intel_uncore_init_fun *)&generic_uncore_init;
                else
                        return -ENODEV;
-       } else
+       } else {
                uncore_init = (struct intel_uncore_init_fun *)id->driver_data;
+               if (uncore_no_discover && uncore_init->use_discovery)
+                       return -ENODEV;
+               if (uncore_init->use_discovery && !intel_uncore_has_discovery_tables())
+                       return -ENODEV;
+       }
 
        if (uncore_init->pci_init) {
                pret = uncore_init->pci_init();
index 187d728..b968798 100644 (file)
@@ -561,6 +561,7 @@ struct event_constraint *
 uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event);
 void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event);
 u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx);
+void uncore_get_alias_name(char *pmu_name, struct intel_uncore_pmu *pmu);
 
 extern struct intel_uncore_type *empty_uncore[];
 extern struct intel_uncore_type **uncore_msr_uncores;
@@ -608,6 +609,9 @@ void snr_uncore_mmio_init(void);
 int icx_uncore_pci_init(void);
 void icx_uncore_cpu_init(void);
 void icx_uncore_mmio_init(void);
+int spr_uncore_pci_init(void);
+void spr_uncore_cpu_init(void);
+void spr_uncore_mmio_init(void);
 
 /* uncore_nhmex.c */
 void nhmex_uncore_cpu_init(void);
index aba9bff..3049c64 100644 (file)
@@ -337,17 +337,17 @@ static const struct attribute_group generic_uncore_format_group = {
        .attrs = generic_uncore_formats_attr,
 };
 
-static void intel_generic_uncore_msr_init_box(struct intel_uncore_box *box)
+void intel_generic_uncore_msr_init_box(struct intel_uncore_box *box)
 {
        wrmsrl(uncore_msr_box_ctl(box), GENERIC_PMON_BOX_CTL_INT);
 }
 
-static void intel_generic_uncore_msr_disable_box(struct intel_uncore_box *box)
+void intel_generic_uncore_msr_disable_box(struct intel_uncore_box *box)
 {
        wrmsrl(uncore_msr_box_ctl(box), GENERIC_PMON_BOX_CTL_FRZ);
 }
 
-static void intel_generic_uncore_msr_enable_box(struct intel_uncore_box *box)
+void intel_generic_uncore_msr_enable_box(struct intel_uncore_box *box)
 {
        wrmsrl(uncore_msr_box_ctl(box), 0);
 }
@@ -377,7 +377,7 @@ static struct intel_uncore_ops generic_uncore_msr_ops = {
        .read_counter           = uncore_msr_read_counter,
 };
 
-static void intel_generic_uncore_pci_init_box(struct intel_uncore_box *box)
+void intel_generic_uncore_pci_init_box(struct intel_uncore_box *box)
 {
        struct pci_dev *pdev = box->pci_dev;
        int box_ctl = uncore_pci_box_ctl(box);
@@ -386,7 +386,7 @@ static void intel_generic_uncore_pci_init_box(struct intel_uncore_box *box)
        pci_write_config_dword(pdev, box_ctl, GENERIC_PMON_BOX_CTL_INT);
 }
 
-static void intel_generic_uncore_pci_disable_box(struct intel_uncore_box *box)
+void intel_generic_uncore_pci_disable_box(struct intel_uncore_box *box)
 {
        struct pci_dev *pdev = box->pci_dev;
        int box_ctl = uncore_pci_box_ctl(box);
@@ -394,7 +394,7 @@ static void intel_generic_uncore_pci_disable_box(struct intel_uncore_box *box)
        pci_write_config_dword(pdev, box_ctl, GENERIC_PMON_BOX_CTL_FRZ);
 }
 
-static void intel_generic_uncore_pci_enable_box(struct intel_uncore_box *box)
+void intel_generic_uncore_pci_enable_box(struct intel_uncore_box *box)
 {
        struct pci_dev *pdev = box->pci_dev;
        int box_ctl = uncore_pci_box_ctl(box);
@@ -411,8 +411,8 @@ static void intel_generic_uncore_pci_enable_event(struct intel_uncore_box *box,
        pci_write_config_dword(pdev, hwc->config_base, hwc->config);
 }
 
-static void intel_generic_uncore_pci_disable_event(struct intel_uncore_box *box,
-                                            struct perf_event *event)
+void intel_generic_uncore_pci_disable_event(struct intel_uncore_box *box,
+                                           struct perf_event *event)
 {
        struct pci_dev *pdev = box->pci_dev;
        struct hw_perf_event *hwc = &event->hw;
@@ -420,8 +420,8 @@ static void intel_generic_uncore_pci_disable_event(struct intel_uncore_box *box,
        pci_write_config_dword(pdev, hwc->config_base, 0);
 }
 
-static u64 intel_generic_uncore_pci_read_counter(struct intel_uncore_box *box,
-                                          struct perf_event *event)
+u64 intel_generic_uncore_pci_read_counter(struct intel_uncore_box *box,
+                                         struct perf_event *event)
 {
        struct pci_dev *pdev = box->pci_dev;
        struct hw_perf_event *hwc = &event->hw;
@@ -454,7 +454,7 @@ static unsigned int generic_uncore_mmio_box_ctl(struct intel_uncore_box *box)
        return type->box_ctls[box->dieid] + type->mmio_offsets[box->pmu->pmu_idx];
 }
 
-static void intel_generic_uncore_mmio_init_box(struct intel_uncore_box *box)
+void intel_generic_uncore_mmio_init_box(struct intel_uncore_box *box)
 {
        unsigned int box_ctl = generic_uncore_mmio_box_ctl(box);
        struct intel_uncore_type *type = box->pmu->type;
@@ -478,7 +478,7 @@ static void intel_generic_uncore_mmio_init_box(struct intel_uncore_box *box)
        writel(GENERIC_PMON_BOX_CTL_INT, box->io_addr);
 }
 
-static void intel_generic_uncore_mmio_disable_box(struct intel_uncore_box *box)
+void intel_generic_uncore_mmio_disable_box(struct intel_uncore_box *box)
 {
        if (!box->io_addr)
                return;
@@ -486,7 +486,7 @@ static void intel_generic_uncore_mmio_disable_box(struct intel_uncore_box *box)
        writel(GENERIC_PMON_BOX_CTL_FRZ, box->io_addr);
 }
 
-static void intel_generic_uncore_mmio_enable_box(struct intel_uncore_box *box)
+void intel_generic_uncore_mmio_enable_box(struct intel_uncore_box *box)
 {
        if (!box->io_addr)
                return;
@@ -505,8 +505,8 @@ static void intel_generic_uncore_mmio_enable_event(struct intel_uncore_box *box,
        writel(hwc->config, box->io_addr + hwc->config_base);
 }
 
-static void intel_generic_uncore_mmio_disable_event(struct intel_uncore_box *box,
-                                             struct perf_event *event)
+void intel_generic_uncore_mmio_disable_event(struct intel_uncore_box *box,
+                                            struct perf_event *event)
 {
        struct hw_perf_event *hwc = &event->hw;
 
@@ -568,8 +568,8 @@ static bool uncore_update_uncore_type(enum uncore_access_type type_id,
        return true;
 }
 
-static struct intel_uncore_type **
-intel_uncore_generic_init_uncores(enum uncore_access_type type_id)
+struct intel_uncore_type **
+intel_uncore_generic_init_uncores(enum uncore_access_type type_id, int num_extra)
 {
        struct intel_uncore_discovery_type *type;
        struct intel_uncore_type **uncores;
@@ -577,7 +577,7 @@ intel_uncore_generic_init_uncores(enum uncore_access_type type_id)
        struct rb_node *node;
        int i = 0;
 
-       uncores = kcalloc(num_discovered_types[type_id] + 1,
+       uncores = kcalloc(num_discovered_types[type_id] + num_extra + 1,
                          sizeof(struct intel_uncore_type *), GFP_KERNEL);
        if (!uncores)
                return empty_uncore;
@@ -606,17 +606,17 @@ intel_uncore_generic_init_uncores(enum uncore_access_type type_id)
 
 void intel_uncore_generic_uncore_cpu_init(void)
 {
-       uncore_msr_uncores = intel_uncore_generic_init_uncores(UNCORE_ACCESS_MSR);
+       uncore_msr_uncores = intel_uncore_generic_init_uncores(UNCORE_ACCESS_MSR, 0);
 }
 
 int intel_uncore_generic_uncore_pci_init(void)
 {
-       uncore_pci_uncores = intel_uncore_generic_init_uncores(UNCORE_ACCESS_PCI);
+       uncore_pci_uncores = intel_uncore_generic_init_uncores(UNCORE_ACCESS_PCI, 0);
 
        return 0;
 }
 
 void intel_uncore_generic_uncore_mmio_init(void)
 {
-       uncore_mmio_uncores = intel_uncore_generic_init_uncores(UNCORE_ACCESS_MMIO);
+       uncore_mmio_uncores = intel_uncore_generic_init_uncores(UNCORE_ACCESS_MMIO, 0);
 }
index 1d65293..7280c8a 100644 (file)
@@ -129,3 +129,24 @@ void intel_uncore_clear_discovery_tables(void);
 void intel_uncore_generic_uncore_cpu_init(void);
 int intel_uncore_generic_uncore_pci_init(void);
 void intel_uncore_generic_uncore_mmio_init(void);
+
+void intel_generic_uncore_msr_init_box(struct intel_uncore_box *box);
+void intel_generic_uncore_msr_disable_box(struct intel_uncore_box *box);
+void intel_generic_uncore_msr_enable_box(struct intel_uncore_box *box);
+
+void intel_generic_uncore_mmio_init_box(struct intel_uncore_box *box);
+void intel_generic_uncore_mmio_disable_box(struct intel_uncore_box *box);
+void intel_generic_uncore_mmio_enable_box(struct intel_uncore_box *box);
+void intel_generic_uncore_mmio_disable_event(struct intel_uncore_box *box,
+                                            struct perf_event *event);
+
+void intel_generic_uncore_pci_init_box(struct intel_uncore_box *box);
+void intel_generic_uncore_pci_disable_box(struct intel_uncore_box *box);
+void intel_generic_uncore_pci_enable_box(struct intel_uncore_box *box);
+void intel_generic_uncore_pci_disable_event(struct intel_uncore_box *box,
+                                           struct perf_event *event);
+u64 intel_generic_uncore_pci_read_counter(struct intel_uncore_box *box,
+                                         struct perf_event *event);
+
+struct intel_uncore_type **
+intel_uncore_generic_init_uncores(enum uncore_access_type type_id, int num_extra);
index 609c24a..5ddc0f3 100644 (file)
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 /* SandyBridge-EP/IvyTown uncore support */
 #include "uncore.h"
+#include "uncore_discovery.h"
 
 /* SNB-EP pci bus to socket mapping */
 #define SNBEP_CPUNODEID                        0x40
 #define ICX_NUMBER_IMC_CHN                     2
 #define ICX_IMC_MEM_STRIDE                     0x4
 
+/* SPR */
+#define SPR_RAW_EVENT_MASK_EXT                 0xffffff
+
+/* SPR CHA */
+#define SPR_CHA_PMON_CTL_TID_EN                        (1 << 16)
+#define SPR_CHA_PMON_EVENT_MASK                        (SNBEP_PMON_RAW_EVENT_MASK | \
+                                                SPR_CHA_PMON_CTL_TID_EN)
+#define SPR_CHA_PMON_BOX_FILTER_TID            0x3ff
+
+#define SPR_C0_MSR_PMON_BOX_FILTER0            0x200e
+
 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
 DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6");
 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
@@ -466,6 +478,7 @@ DEFINE_UNCORE_FORMAT_ATTR(umask_ext4, umask, "config:8-15,32-55");
 DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
+DEFINE_UNCORE_FORMAT_ATTR(tid_en2, tid_en, "config:16");
 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
 DEFINE_UNCORE_FORMAT_ATTR(thresh9, thresh, "config:24-35");
 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
@@ -3838,26 +3851,32 @@ clear_attr_update:
        return ret;
 }
 
-static int skx_iio_set_mapping(struct intel_uncore_type *type)
-{
-       return pmu_iio_set_mapping(type, &skx_iio_mapping_group);
-}
-
-static void skx_iio_cleanup_mapping(struct intel_uncore_type *type)
+static void
+pmu_iio_cleanup_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
 {
-       struct attribute **attr = skx_iio_mapping_group.attrs;
+       struct attribute **attr = ag->attrs;
 
        if (!attr)
                return;
 
        for (; *attr; attr++)
                kfree((*attr)->name);
-       kfree(attr_to_ext_attr(*skx_iio_mapping_group.attrs));
-       kfree(skx_iio_mapping_group.attrs);
-       skx_iio_mapping_group.attrs = NULL;
+       kfree(attr_to_ext_attr(*ag->attrs));
+       kfree(ag->attrs);
+       ag->attrs = NULL;
        kfree(type->topology);
 }
 
+static int skx_iio_set_mapping(struct intel_uncore_type *type)
+{
+       return pmu_iio_set_mapping(type, &skx_iio_mapping_group);
+}
+
+static void skx_iio_cleanup_mapping(struct intel_uncore_type *type)
+{
+       pmu_iio_cleanup_mapping(type, &skx_iio_mapping_group);
+}
+
 static struct intel_uncore_type skx_uncore_iio = {
        .name                   = "iio",
        .num_counters           = 4,
@@ -4501,6 +4520,11 @@ static int snr_iio_set_mapping(struct intel_uncore_type *type)
        return pmu_iio_set_mapping(type, &snr_iio_mapping_group);
 }
 
+static void snr_iio_cleanup_mapping(struct intel_uncore_type *type)
+{
+       pmu_iio_cleanup_mapping(type, &snr_iio_mapping_group);
+}
+
 static struct intel_uncore_type snr_uncore_iio = {
        .name                   = "iio",
        .num_counters           = 4,
@@ -4517,7 +4541,7 @@ static struct intel_uncore_type snr_uncore_iio = {
        .attr_update            = snr_iio_attr_update,
        .get_topology           = snr_iio_get_topology,
        .set_mapping            = snr_iio_set_mapping,
-       .cleanup_mapping        = skx_iio_cleanup_mapping,
+       .cleanup_mapping        = snr_iio_cleanup_mapping,
 };
 
 static struct intel_uncore_type snr_uncore_irp = {
@@ -4783,13 +4807,15 @@ int snr_uncore_pci_init(void)
        return 0;
 }
 
-static struct pci_dev *snr_uncore_get_mc_dev(int id)
+#define SNR_MC_DEVICE_ID       0x3451
+
+static struct pci_dev *snr_uncore_get_mc_dev(unsigned int device, int id)
 {
        struct pci_dev *mc_dev = NULL;
        int pkg;
 
        while (1) {
-               mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3451, mc_dev);
+               mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, mc_dev);
                if (!mc_dev)
                        break;
                pkg = uncore_pcibus_to_dieid(mc_dev->bus);
@@ -4799,19 +4825,20 @@ static struct pci_dev *snr_uncore_get_mc_dev(int id)
        return mc_dev;
 }
 
-static void __snr_uncore_mmio_init_box(struct intel_uncore_box *box,
-                                      unsigned int box_ctl, int mem_offset)
+static int snr_uncore_mmio_map(struct intel_uncore_box *box,
+                              unsigned int box_ctl, int mem_offset,
+                              unsigned int device)
 {
-       struct pci_dev *pdev = snr_uncore_get_mc_dev(box->dieid);
+       struct pci_dev *pdev = snr_uncore_get_mc_dev(device, box->dieid);
        struct intel_uncore_type *type = box->pmu->type;
        resource_size_t addr;
        u32 pci_dword;
 
        if (!pdev)
-               return;
+               return -ENODEV;
 
        pci_read_config_dword(pdev, SNR_IMC_MMIO_BASE_OFFSET, &pci_dword);
-       addr = (pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23;
+       addr = ((resource_size_t)pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23;
 
        pci_read_config_dword(pdev, mem_offset, &pci_dword);
        addr |= (pci_dword & SNR_IMC_MMIO_MEM0_MASK) << 12;
@@ -4821,16 +4848,25 @@ static void __snr_uncore_mmio_init_box(struct intel_uncore_box *box,
        box->io_addr = ioremap(addr, type->mmio_map_size);
        if (!box->io_addr) {
                pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
-               return;
+               return -EINVAL;
        }
 
-       writel(IVBEP_PMON_BOX_CTL_INT, box->io_addr);
+       return 0;
+}
+
+static void __snr_uncore_mmio_init_box(struct intel_uncore_box *box,
+                                      unsigned int box_ctl, int mem_offset,
+                                      unsigned int device)
+{
+       if (!snr_uncore_mmio_map(box, box_ctl, mem_offset, device))
+               writel(IVBEP_PMON_BOX_CTL_INT, box->io_addr);
 }
 
 static void snr_uncore_mmio_init_box(struct intel_uncore_box *box)
 {
        __snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box),
-                                  SNR_IMC_MMIO_MEM0_OFFSET);
+                                  SNR_IMC_MMIO_MEM0_OFFSET,
+                                  SNR_MC_DEVICE_ID);
 }
 
 static void snr_uncore_mmio_disable_box(struct intel_uncore_box *box)
@@ -5092,6 +5128,11 @@ static int icx_iio_set_mapping(struct intel_uncore_type *type)
        return pmu_iio_set_mapping(type, &icx_iio_mapping_group);
 }
 
+static void icx_iio_cleanup_mapping(struct intel_uncore_type *type)
+{
+       pmu_iio_cleanup_mapping(type, &icx_iio_mapping_group);
+}
+
 static struct intel_uncore_type icx_uncore_iio = {
        .name                   = "iio",
        .num_counters           = 4,
@@ -5109,7 +5150,7 @@ static struct intel_uncore_type icx_uncore_iio = {
        .attr_update            = icx_iio_attr_update,
        .get_topology           = icx_iio_get_topology,
        .set_mapping            = icx_iio_set_mapping,
-       .cleanup_mapping        = skx_iio_cleanup_mapping,
+       .cleanup_mapping        = icx_iio_cleanup_mapping,
 };
 
 static struct intel_uncore_type icx_uncore_irp = {
@@ -5405,7 +5446,8 @@ static void icx_uncore_imc_init_box(struct intel_uncore_box *box)
        int mem_offset = (box->pmu->pmu_idx / ICX_NUMBER_IMC_CHN) * ICX_IMC_MEM_STRIDE +
                         SNR_IMC_MMIO_MEM0_OFFSET;
 
-       __snr_uncore_mmio_init_box(box, box_ctl, mem_offset);
+       __snr_uncore_mmio_init_box(box, box_ctl, mem_offset,
+                                  SNR_MC_DEVICE_ID);
 }
 
 static struct intel_uncore_ops icx_uncore_mmio_ops = {
@@ -5475,7 +5517,8 @@ static void icx_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
        int mem_offset = box->pmu->pmu_idx * ICX_IMC_MEM_STRIDE +
                         SNR_IMC_MMIO_MEM0_OFFSET;
 
-       __snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box), mem_offset);
+       snr_uncore_mmio_map(box, uncore_mmio_box_ctl(box),
+                           mem_offset, SNR_MC_DEVICE_ID);
 }
 
 static struct intel_uncore_ops icx_uncore_imc_freerunning_ops = {
@@ -5509,3 +5552,497 @@ void icx_uncore_mmio_init(void)
 }
 
 /* end of ICX uncore support */
+
+/* SPR uncore support */
+
+static void spr_uncore_msr_enable_event(struct intel_uncore_box *box,
+                                       struct perf_event *event)
+{
+       struct hw_perf_event *hwc = &event->hw;
+       struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
+
+       if (reg1->idx != EXTRA_REG_NONE)
+               wrmsrl(reg1->reg, reg1->config);
+
+       wrmsrl(hwc->config_base, hwc->config);
+}
+
+static void spr_uncore_msr_disable_event(struct intel_uncore_box *box,
+                                        struct perf_event *event)
+{
+       struct hw_perf_event *hwc = &event->hw;
+       struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
+
+       if (reg1->idx != EXTRA_REG_NONE)
+               wrmsrl(reg1->reg, 0);
+
+       wrmsrl(hwc->config_base, 0);
+}
+
+static int spr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
+{
+       struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
+       bool tie_en = !!(event->hw.config & SPR_CHA_PMON_CTL_TID_EN);
+       struct intel_uncore_type *type = box->pmu->type;
+
+       if (tie_en) {
+               reg1->reg = SPR_C0_MSR_PMON_BOX_FILTER0 +
+                           HSWEP_CBO_MSR_OFFSET * type->box_ids[box->pmu->pmu_idx];
+               reg1->config = event->attr.config1 & SPR_CHA_PMON_BOX_FILTER_TID;
+               reg1->idx = 0;
+       }
+
+       return 0;
+}
+
+static struct intel_uncore_ops spr_uncore_chabox_ops = {
+       .init_box               = intel_generic_uncore_msr_init_box,
+       .disable_box            = intel_generic_uncore_msr_disable_box,
+       .enable_box             = intel_generic_uncore_msr_enable_box,
+       .disable_event          = spr_uncore_msr_disable_event,
+       .enable_event           = spr_uncore_msr_enable_event,
+       .read_counter           = uncore_msr_read_counter,
+       .hw_config              = spr_cha_hw_config,
+       .get_constraint         = uncore_get_constraint,
+       .put_constraint         = uncore_put_constraint,
+};
+
+static struct attribute *spr_uncore_cha_formats_attr[] = {
+       &format_attr_event.attr,
+       &format_attr_umask_ext4.attr,
+       &format_attr_tid_en2.attr,
+       &format_attr_edge.attr,
+       &format_attr_inv.attr,
+       &format_attr_thresh8.attr,
+       &format_attr_filter_tid5.attr,
+       NULL,
+};
+static const struct attribute_group spr_uncore_chabox_format_group = {
+       .name = "format",
+       .attrs = spr_uncore_cha_formats_attr,
+};
+
+static ssize_t alias_show(struct device *dev,
+                         struct device_attribute *attr,
+                         char *buf)
+{
+       struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev);
+       char pmu_name[UNCORE_PMU_NAME_LEN];
+
+       uncore_get_alias_name(pmu_name, pmu);
+       return sysfs_emit(buf, "%s\n", pmu_name);
+}
+
+static DEVICE_ATTR_RO(alias);
+
+static struct attribute *uncore_alias_attrs[] = {
+       &dev_attr_alias.attr,
+       NULL
+};
+
+ATTRIBUTE_GROUPS(uncore_alias);
+
+static struct intel_uncore_type spr_uncore_chabox = {
+       .name                   = "cha",
+       .event_mask             = SPR_CHA_PMON_EVENT_MASK,
+       .event_mask_ext         = SPR_RAW_EVENT_MASK_EXT,
+       .num_shared_regs        = 1,
+       .ops                    = &spr_uncore_chabox_ops,
+       .format_group           = &spr_uncore_chabox_format_group,
+       .attr_update            = uncore_alias_groups,
+};
+
+static struct intel_uncore_type spr_uncore_iio = {
+       .name                   = "iio",
+       .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
+       .event_mask_ext         = SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
+       .format_group           = &snr_uncore_iio_format_group,
+       .attr_update            = uncore_alias_groups,
+};
+
+static struct attribute *spr_uncore_raw_formats_attr[] = {
+       &format_attr_event.attr,
+       &format_attr_umask_ext4.attr,
+       &format_attr_edge.attr,
+       &format_attr_inv.attr,
+       &format_attr_thresh8.attr,
+       NULL,
+};
+
+static const struct attribute_group spr_uncore_raw_format_group = {
+       .name                   = "format",
+       .attrs                  = spr_uncore_raw_formats_attr,
+};
+
+#define SPR_UNCORE_COMMON_FORMAT()                             \
+       .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,    \
+       .event_mask_ext         = SPR_RAW_EVENT_MASK_EXT,       \
+       .format_group           = &spr_uncore_raw_format_group, \
+       .attr_update            = uncore_alias_groups
+
+static struct intel_uncore_type spr_uncore_irp = {
+       SPR_UNCORE_COMMON_FORMAT(),
+       .name                   = "irp",
+
+};
+
+static struct intel_uncore_type spr_uncore_m2pcie = {
+       SPR_UNCORE_COMMON_FORMAT(),
+       .name                   = "m2pcie",
+};
+
+static struct intel_uncore_type spr_uncore_pcu = {
+       .name                   = "pcu",
+       .attr_update            = uncore_alias_groups,
+};
+
+static void spr_uncore_mmio_enable_event(struct intel_uncore_box *box,
+                                        struct perf_event *event)
+{
+       struct hw_perf_event *hwc = &event->hw;
+
+       if (!box->io_addr)
+               return;
+
+       if (uncore_pmc_fixed(hwc->idx))
+               writel(SNBEP_PMON_CTL_EN, box->io_addr + hwc->config_base);
+       else
+               writel(hwc->config, box->io_addr + hwc->config_base);
+}
+
+static struct intel_uncore_ops spr_uncore_mmio_ops = {
+       .init_box               = intel_generic_uncore_mmio_init_box,
+       .exit_box               = uncore_mmio_exit_box,
+       .disable_box            = intel_generic_uncore_mmio_disable_box,
+       .enable_box             = intel_generic_uncore_mmio_enable_box,
+       .disable_event          = intel_generic_uncore_mmio_disable_event,
+       .enable_event           = spr_uncore_mmio_enable_event,
+       .read_counter           = uncore_mmio_read_counter,
+};
+
+static struct intel_uncore_type spr_uncore_imc = {
+       SPR_UNCORE_COMMON_FORMAT(),
+       .name                   = "imc",
+       .fixed_ctr_bits         = 48,
+       .fixed_ctr              = SNR_IMC_MMIO_PMON_FIXED_CTR,
+       .fixed_ctl              = SNR_IMC_MMIO_PMON_FIXED_CTL,
+       .ops                    = &spr_uncore_mmio_ops,
+};
+
+static void spr_uncore_pci_enable_event(struct intel_uncore_box *box,
+                                       struct perf_event *event)
+{
+       struct pci_dev *pdev = box->pci_dev;
+       struct hw_perf_event *hwc = &event->hw;
+
+       pci_write_config_dword(pdev, hwc->config_base + 4, (u32)(hwc->config >> 32));
+       pci_write_config_dword(pdev, hwc->config_base, (u32)hwc->config);
+}
+
+static struct intel_uncore_ops spr_uncore_pci_ops = {
+       .init_box               = intel_generic_uncore_pci_init_box,
+       .disable_box            = intel_generic_uncore_pci_disable_box,
+       .enable_box             = intel_generic_uncore_pci_enable_box,
+       .disable_event          = intel_generic_uncore_pci_disable_event,
+       .enable_event           = spr_uncore_pci_enable_event,
+       .read_counter           = intel_generic_uncore_pci_read_counter,
+};
+
+#define SPR_UNCORE_PCI_COMMON_FORMAT()                 \
+       SPR_UNCORE_COMMON_FORMAT(),                     \
+       .ops                    = &spr_uncore_pci_ops
+
+static struct intel_uncore_type spr_uncore_m2m = {
+       SPR_UNCORE_PCI_COMMON_FORMAT(),
+       .name                   = "m2m",
+};
+
+static struct intel_uncore_type spr_uncore_upi = {
+       SPR_UNCORE_PCI_COMMON_FORMAT(),
+       .name                   = "upi",
+};
+
+static struct intel_uncore_type spr_uncore_m3upi = {
+       SPR_UNCORE_PCI_COMMON_FORMAT(),
+       .name                   = "m3upi",
+};
+
+static struct intel_uncore_type spr_uncore_mdf = {
+       SPR_UNCORE_COMMON_FORMAT(),
+       .name                   = "mdf",
+};
+
+#define UNCORE_SPR_NUM_UNCORE_TYPES            12
+#define UNCORE_SPR_IIO                         1
+#define UNCORE_SPR_IMC                         6
+
+static struct intel_uncore_type *spr_uncores[UNCORE_SPR_NUM_UNCORE_TYPES] = {
+       &spr_uncore_chabox,
+       &spr_uncore_iio,
+       &spr_uncore_irp,
+       &spr_uncore_m2pcie,
+       &spr_uncore_pcu,
+       NULL,
+       &spr_uncore_imc,
+       &spr_uncore_m2m,
+       &spr_uncore_upi,
+       &spr_uncore_m3upi,
+       NULL,
+       &spr_uncore_mdf,
+};
+
+enum perf_uncore_spr_iio_freerunning_type_id {
+       SPR_IIO_MSR_IOCLK,
+       SPR_IIO_MSR_BW_IN,
+       SPR_IIO_MSR_BW_OUT,
+
+       SPR_IIO_FREERUNNING_TYPE_MAX,
+};
+
+static struct freerunning_counters spr_iio_freerunning[] = {
+       [SPR_IIO_MSR_IOCLK]     = { 0x340e, 0x1, 0x10, 1, 48 },
+       [SPR_IIO_MSR_BW_IN]     = { 0x3800, 0x1, 0x10, 8, 48 },
+       [SPR_IIO_MSR_BW_OUT]    = { 0x3808, 0x1, 0x10, 8, 48 },
+};
+
+static struct uncore_event_desc spr_uncore_iio_freerunning_events[] = {
+       /* Free-Running IIO CLOCKS Counter */
+       INTEL_UNCORE_EVENT_DESC(ioclk,                  "event=0xff,umask=0x10"),
+       /* Free-Running IIO BANDWIDTH IN Counters */
+       INTEL_UNCORE_EVENT_DESC(bw_in_port0,            "event=0xff,umask=0x20"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,      "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,       "MiB"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port1,            "event=0xff,umask=0x21"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,      "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,       "MiB"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port2,            "event=0xff,umask=0x22"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,      "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,       "MiB"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port3,            "event=0xff,umask=0x23"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,      "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,       "MiB"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port4,            "event=0xff,umask=0x24"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale,      "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit,       "MiB"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port5,            "event=0xff,umask=0x25"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale,      "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit,       "MiB"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port6,            "event=0xff,umask=0x26"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale,      "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit,       "MiB"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port7,            "event=0xff,umask=0x27"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale,      "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit,       "MiB"),
+       /* Free-Running IIO BANDWIDTH OUT Counters */
+       INTEL_UNCORE_EVENT_DESC(bw_out_port0,           "event=0xff,umask=0x30"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale,     "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit,      "MiB"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port1,           "event=0xff,umask=0x31"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale,     "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit,      "MiB"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port2,           "event=0xff,umask=0x32"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale,     "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit,      "MiB"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port3,           "event=0xff,umask=0x33"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale,     "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit,      "MiB"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port4,           "event=0xff,umask=0x34"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port4.scale,     "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port4.unit,      "MiB"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port5,           "event=0xff,umask=0x35"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port5.scale,     "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port5.unit,      "MiB"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port6,           "event=0xff,umask=0x36"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port6.scale,     "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port6.unit,      "MiB"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port7,           "event=0xff,umask=0x37"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port7.scale,     "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port7.unit,      "MiB"),
+       { /* end: all zeroes */ },
+};
+
+static struct intel_uncore_type spr_uncore_iio_free_running = {
+       .name                   = "iio_free_running",
+       .num_counters           = 17,
+       .num_freerunning_types  = SPR_IIO_FREERUNNING_TYPE_MAX,
+       .freerunning            = spr_iio_freerunning,
+       .ops                    = &skx_uncore_iio_freerunning_ops,
+       .event_descs            = spr_uncore_iio_freerunning_events,
+       .format_group           = &skx_uncore_iio_freerunning_format_group,
+};
+
+enum perf_uncore_spr_imc_freerunning_type_id {
+       SPR_IMC_DCLK,
+       SPR_IMC_PQ_CYCLES,
+
+       SPR_IMC_FREERUNNING_TYPE_MAX,
+};
+
+static struct freerunning_counters spr_imc_freerunning[] = {
+       [SPR_IMC_DCLK]          = { 0x22b0, 0x0, 0, 1, 48 },
+       [SPR_IMC_PQ_CYCLES]     = { 0x2318, 0x8, 0, 2, 48 },
+};
+
+static struct uncore_event_desc spr_uncore_imc_freerunning_events[] = {
+       INTEL_UNCORE_EVENT_DESC(dclk,                   "event=0xff,umask=0x10"),
+
+       INTEL_UNCORE_EVENT_DESC(rpq_cycles,             "event=0xff,umask=0x20"),
+       INTEL_UNCORE_EVENT_DESC(wpq_cycles,             "event=0xff,umask=0x21"),
+       { /* end: all zeroes */ },
+};
+
+#define SPR_MC_DEVICE_ID       0x3251
+
+static void spr_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
+{
+       int mem_offset = box->pmu->pmu_idx * ICX_IMC_MEM_STRIDE + SNR_IMC_MMIO_MEM0_OFFSET;
+
+       snr_uncore_mmio_map(box, uncore_mmio_box_ctl(box),
+                           mem_offset, SPR_MC_DEVICE_ID);
+}
+
+static struct intel_uncore_ops spr_uncore_imc_freerunning_ops = {
+       .init_box       = spr_uncore_imc_freerunning_init_box,
+       .exit_box       = uncore_mmio_exit_box,
+       .read_counter   = uncore_mmio_read_counter,
+       .hw_config      = uncore_freerunning_hw_config,
+};
+
+static struct intel_uncore_type spr_uncore_imc_free_running = {
+       .name                   = "imc_free_running",
+       .num_counters           = 3,
+       .mmio_map_size          = SNR_IMC_MMIO_SIZE,
+       .num_freerunning_types  = SPR_IMC_FREERUNNING_TYPE_MAX,
+       .freerunning            = spr_imc_freerunning,
+       .ops                    = &spr_uncore_imc_freerunning_ops,
+       .event_descs            = spr_uncore_imc_freerunning_events,
+       .format_group           = &skx_uncore_iio_freerunning_format_group,
+};
+
+#define UNCORE_SPR_MSR_EXTRA_UNCORES           1
+#define UNCORE_SPR_MMIO_EXTRA_UNCORES          1
+
+static struct intel_uncore_type *spr_msr_uncores[UNCORE_SPR_MSR_EXTRA_UNCORES] = {
+       &spr_uncore_iio_free_running,
+};
+
+static struct intel_uncore_type *spr_mmio_uncores[UNCORE_SPR_MMIO_EXTRA_UNCORES] = {
+       &spr_uncore_imc_free_running,
+};
+
+static void uncore_type_customized_copy(struct intel_uncore_type *to_type,
+                                       struct intel_uncore_type *from_type)
+{
+       if (!to_type || !from_type)
+               return;
+
+       if (from_type->name)
+               to_type->name = from_type->name;
+       if (from_type->fixed_ctr_bits)
+               to_type->fixed_ctr_bits = from_type->fixed_ctr_bits;
+       if (from_type->event_mask)
+               to_type->event_mask = from_type->event_mask;
+       if (from_type->event_mask_ext)
+               to_type->event_mask_ext = from_type->event_mask_ext;
+       if (from_type->fixed_ctr)
+               to_type->fixed_ctr = from_type->fixed_ctr;
+       if (from_type->fixed_ctl)
+               to_type->fixed_ctl = from_type->fixed_ctl;
+       if (from_type->fixed_ctr_bits)
+               to_type->fixed_ctr_bits = from_type->fixed_ctr_bits;
+       if (from_type->num_shared_regs)
+               to_type->num_shared_regs = from_type->num_shared_regs;
+       if (from_type->constraints)
+               to_type->constraints = from_type->constraints;
+       if (from_type->ops)
+               to_type->ops = from_type->ops;
+       if (from_type->event_descs)
+               to_type->event_descs = from_type->event_descs;
+       if (from_type->format_group)
+               to_type->format_group = from_type->format_group;
+       if (from_type->attr_update)
+               to_type->attr_update = from_type->attr_update;
+}
+
+static struct intel_uncore_type **
+uncore_get_uncores(enum uncore_access_type type_id, int num_extra,
+                   struct intel_uncore_type **extra)
+{
+       struct intel_uncore_type **types, **start_types;
+       int i;
+
+       start_types = types = intel_uncore_generic_init_uncores(type_id, num_extra);
+
+       /* Only copy the customized features */
+       for (; *types; types++) {
+               if ((*types)->type_id >= UNCORE_SPR_NUM_UNCORE_TYPES)
+                       continue;
+               uncore_type_customized_copy(*types, spr_uncores[(*types)->type_id]);
+       }
+
+       for (i = 0; i < num_extra; i++, types++)
+               *types = extra[i];
+
+       return start_types;
+}
+
+static struct intel_uncore_type *
+uncore_find_type_by_id(struct intel_uncore_type **types, int type_id)
+{
+       for (; *types; types++) {
+               if (type_id == (*types)->type_id)
+                       return *types;
+       }
+
+       return NULL;
+}
+
+static int uncore_type_max_boxes(struct intel_uncore_type **types,
+                                int type_id)
+{
+       struct intel_uncore_type *type;
+       int i, max = 0;
+
+       type = uncore_find_type_by_id(types, type_id);
+       if (!type)
+               return 0;
+
+       for (i = 0; i < type->num_boxes; i++) {
+               if (type->box_ids[i] > max)
+                       max = type->box_ids[i];
+       }
+
+       return max + 1;
+}
+
+void spr_uncore_cpu_init(void)
+{
+       uncore_msr_uncores = uncore_get_uncores(UNCORE_ACCESS_MSR,
+                                               UNCORE_SPR_MSR_EXTRA_UNCORES,
+                                               spr_msr_uncores);
+
+       spr_uncore_iio_free_running.num_boxes = uncore_type_max_boxes(uncore_msr_uncores, UNCORE_SPR_IIO);
+}
+
+int spr_uncore_pci_init(void)
+{
+       uncore_pci_uncores = uncore_get_uncores(UNCORE_ACCESS_PCI, 0, NULL);
+       return 0;
+}
+
+void spr_uncore_mmio_init(void)
+{
+       int ret = snbep_pci2phy_map_init(0x3250, SKX_CPUNODEID, SKX_GIDNIDMAP, true);
+
+       if (ret)
+               uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO, 0, NULL);
+       else {
+               uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO,
+                                                        UNCORE_SPR_MMIO_EXTRA_UNCORES,
+                                                        spr_mmio_uncores);
+
+               spr_uncore_imc_free_running.num_boxes = uncore_type_max_boxes(uncore_mmio_uncores, UNCORE_SPR_IMC) / 2;
+       }
+}
+
+/* end of SPR uncore support */
index 2bf1c7e..e3ac05c 100644 (file)
@@ -656,6 +656,10 @@ struct x86_hybrid_pmu {
        struct event_constraint         *event_constraints;
        struct event_constraint         *pebs_constraints;
        struct extra_reg                *extra_regs;
+
+       unsigned int                    late_ack        :1,
+                                       mid_ack         :1,
+                                       enabled_ack     :1;
 };
 
 static __always_inline struct x86_hybrid_pmu *hybrid_pmu(struct pmu *pmu)
@@ -686,6 +690,16 @@ extern struct static_key_false perf_is_hybrid;
        __Fp;                                           \
 }))
 
+#define hybrid_bit(_pmu, _field)                       \
+({                                                     \
+       bool __Fp = x86_pmu._field;                     \
+                                                       \
+       if (is_hybrid() && (_pmu))                      \
+               __Fp = hybrid_pmu(_pmu)->_field;        \
+                                                       \
+       __Fp;                                           \
+})
+
 enum hybrid_pmu_type {
        hybrid_big              = 0x40,
        hybrid_small            = 0x20,
@@ -755,6 +769,7 @@ struct x86_pmu {
 
        /* PMI handler bits */
        unsigned int    late_ack                :1,
+                       mid_ack                 :1,
                        enabled_ack             :1;
        /*
         * sysfs attrs
@@ -1115,9 +1130,10 @@ void x86_pmu_stop(struct perf_event *event, int flags);
 
 static inline void x86_pmu_disable_event(struct perf_event *event)
 {
+       u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask);
        struct hw_perf_event *hwc = &event->hw;
 
-       wrmsrl(hwc->config_base, hwc->config);
+       wrmsrl(hwc->config_base, hwc->config & ~disable_mask);
 
        if (is_counter_pair(hwc))
                wrmsrl(x86_pmu_config_addr(hwc->idx + 1), 0);
diff --git a/arch/x86/include/asm/amd-ibs.h b/arch/x86/include/asm/amd-ibs.h
new file mode 100644 (file)
index 0000000..46e1df4
--- /dev/null
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * From PPR Vol 1 for AMD Family 19h Model 01h B1
+ * 55898 Rev 0.35 - Feb 5, 2021
+ */
+
+#include <asm/msr-index.h>
+
+/*
+ * IBS Hardware MSRs
+ */
+
+/* MSR 0xc0011030: IBS Fetch Control */
+union ibs_fetch_ctl {
+       __u64 val;
+       struct {
+               __u64   fetch_maxcnt:16,/* 0-15: instruction fetch max. count */
+                       fetch_cnt:16,   /* 16-31: instruction fetch count */
+                       fetch_lat:16,   /* 32-47: instruction fetch latency */
+                       fetch_en:1,     /* 48: instruction fetch enable */
+                       fetch_val:1,    /* 49: instruction fetch valid */
+                       fetch_comp:1,   /* 50: instruction fetch complete */
+                       ic_miss:1,      /* 51: i-cache miss */
+                       phy_addr_valid:1,/* 52: physical address valid */
+                       l1tlb_pgsz:2,   /* 53-54: i-cache L1TLB page size
+                                        *        (needs IbsPhyAddrValid) */
+                       l1tlb_miss:1,   /* 55: i-cache fetch missed in L1TLB */
+                       l2tlb_miss:1,   /* 56: i-cache fetch missed in L2TLB */
+                       rand_en:1,      /* 57: random tagging enable */
+                       fetch_l2_miss:1,/* 58: L2 miss for sampled fetch
+                                        *      (needs IbsFetchComp) */
+                       reserved:5;     /* 59-63: reserved */
+       };
+};
+
+/* MSR 0xc0011033: IBS Execution Control */
+union ibs_op_ctl {
+       __u64 val;
+       struct {
+               __u64   opmaxcnt:16,    /* 0-15: periodic op max. count */
+                       reserved0:1,    /* 16: reserved */
+                       op_en:1,        /* 17: op sampling enable */
+                       op_val:1,       /* 18: op sample valid */
+                       cnt_ctl:1,      /* 19: periodic op counter control */
+                       opmaxcnt_ext:7, /* 20-26: upper 7 bits of periodic op maximum count */
+                       reserved1:5,    /* 27-31: reserved */
+                       opcurcnt:27,    /* 32-58: periodic op counter current count */
+                       reserved2:5;    /* 59-63: reserved */
+       };
+};
+
+/* MSR 0xc0011035: IBS Op Data 2 */
+union ibs_op_data {
+       __u64 val;
+       struct {
+               __u64   comp_to_ret_ctr:16,     /* 0-15: op completion to retire count */
+                       tag_to_ret_ctr:16,      /* 15-31: op tag to retire count */
+                       reserved1:2,            /* 32-33: reserved */
+                       op_return:1,            /* 34: return op */
+                       op_brn_taken:1,         /* 35: taken branch op */
+                       op_brn_misp:1,          /* 36: mispredicted branch op */
+                       op_brn_ret:1,           /* 37: branch op retired */
+                       op_rip_invalid:1,       /* 38: RIP is invalid */
+                       op_brn_fuse:1,          /* 39: fused branch op */
+                       op_microcode:1,         /* 40: microcode op */
+                       reserved2:23;           /* 41-63: reserved */
+       };
+};
+
+/* MSR 0xc0011036: IBS Op Data 2 */
+union ibs_op_data2 {
+       __u64 val;
+       struct {
+               __u64   data_src:3,     /* 0-2: data source */
+                       reserved0:1,    /* 3: reserved */
+                       rmt_node:1,     /* 4: destination node */
+                       cache_hit_st:1, /* 5: cache hit state */
+                       reserved1:57;   /* 5-63: reserved */
+       };
+};
+
+/* MSR 0xc0011037: IBS Op Data 3 */
+union ibs_op_data3 {
+       __u64 val;
+       struct {
+               __u64   ld_op:1,                        /* 0: load op */
+                       st_op:1,                        /* 1: store op */
+                       dc_l1tlb_miss:1,                /* 2: data cache L1TLB miss */
+                       dc_l2tlb_miss:1,                /* 3: data cache L2TLB hit in 2M page */
+                       dc_l1tlb_hit_2m:1,              /* 4: data cache L1TLB hit in 2M page */
+                       dc_l1tlb_hit_1g:1,              /* 5: data cache L1TLB hit in 1G page */
+                       dc_l2tlb_hit_2m:1,              /* 6: data cache L2TLB hit in 2M page */
+                       dc_miss:1,                      /* 7: data cache miss */
+                       dc_mis_acc:1,                   /* 8: misaligned access */
+                       reserved:4,                     /* 9-12: reserved */
+                       dc_wc_mem_acc:1,                /* 13: write combining memory access */
+                       dc_uc_mem_acc:1,                /* 14: uncacheable memory access */
+                       dc_locked_op:1,                 /* 15: locked operation */
+                       dc_miss_no_mab_alloc:1,         /* 16: DC miss with no MAB allocated */
+                       dc_lin_addr_valid:1,            /* 17: data cache linear address valid */
+                       dc_phy_addr_valid:1,            /* 18: data cache physical address valid */
+                       dc_l2_tlb_hit_1g:1,             /* 19: data cache L2 hit in 1GB page */
+                       l2_miss:1,                      /* 20: L2 cache miss */
+                       sw_pf:1,                        /* 21: software prefetch */
+                       op_mem_width:4,                 /* 22-25: load/store size in bytes */
+                       op_dc_miss_open_mem_reqs:6,     /* 26-31: outstanding mem reqs on DC fill */
+                       dc_miss_lat:16,                 /* 32-47: data cache miss latency */
+                       tlb_refill_lat:16;              /* 48-63: L1 TLB refill latency */
+       };
+};
+
+/* MSR 0xc001103c: IBS Fetch Control Extended */
+union ic_ibs_extd_ctl {
+       __u64 val;
+       struct {
+               __u64   itlb_refill_lat:16,     /* 0-15: ITLB Refill latency for sampled fetch */
+                       reserved:48;            /* 16-63: reserved */
+       };
+};
+
+/*
+ * IBS driver related
+ */
+
+struct perf_ibs_data {
+       u32             size;
+       union {
+               u32     data[0];        /* data buffer starts here */
+               u32     caps;
+       };
+       u64             regs[MSR_AMD64_IBS_REG_COUNT_MAX];
+};
index 89789e8..637fa1d 100644 (file)
@@ -19,6 +19,8 @@ extern unsigned int cached_irq_mask;
 #define PIC_MASTER_OCW3                PIC_MASTER_ISR
 #define PIC_SLAVE_CMD          0xa0
 #define PIC_SLAVE_IMR          0xa1
+#define PIC_ELCR1              0x4d0
+#define PIC_ELCR2              0x4d1
 
 /* i8259A PIC related value */
 #define PIC_CASCADE_IR         2
index 05b48b3..ff5c713 100644 (file)
@@ -8,6 +8,8 @@
 #ifndef _ASM_X86_KFENCE_H
 #define _ASM_X86_KFENCE_H
 
+#ifndef MODULE
+
 #include <linux/bug.h>
 #include <linux/kfence.h>
 
@@ -66,4 +68,6 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
        return true;
 }
 
+#endif /* !MODULE */
+
 #endif /* _ASM_X86_KFENCE_H */
index 974cbfb..af6ce8d 100644 (file)
@@ -1038,6 +1038,13 @@ struct kvm_arch {
        struct list_head lpage_disallowed_mmu_pages;
        struct kvm_page_track_notifier_node mmu_sp_tracker;
        struct kvm_page_track_notifier_head track_notifier_head;
+       /*
+        * Protects marking pages unsync during page faults, as TDP MMU page
+        * faults only take mmu_lock for read.  For simplicity, the unsync
+        * pages lock is always taken when marking pages unsync regardless of
+        * whether mmu_lock is held for read or write.
+        */
+       spinlock_t mmu_unsync_pages_lock;
 
        struct list_head assigned_dev_head;
        struct iommu_domain *iommu_domain;
index 0607ec4..da93215 100644 (file)
@@ -265,6 +265,7 @@ enum mcp_flags {
        MCP_TIMESTAMP   = BIT(0),       /* log time stamp */
        MCP_UC          = BIT(1),       /* log uncorrected errors */
        MCP_DONTLOG     = BIT(2),       /* only clear, don't log */
+       MCP_QUEUE_LOG   = BIT(3),       /* only queue to genpool */
 };
 bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b);
 
index 3ad8c6d..ec2d5c8 100644 (file)
@@ -252,6 +252,8 @@ DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
 DECLARE_STATIC_KEY_FALSE(mds_user_clear);
 DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
 
+DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
+
 #include <asm/segment.h>
 
 /**
diff --git a/arch/x86/include/asm/pc-conf-reg.h b/arch/x86/include/asm/pc-conf-reg.h
new file mode 100644 (file)
index 0000000..56bcece
--- /dev/null
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for the configuration register space at port I/O locations
+ * 0x22 and 0x23 variously used by PC architectures, e.g. the MP Spec,
+ * Cyrix CPUs, numerous chipsets.
+ */
+#ifndef _ASM_X86_PC_CONF_REG_H
+#define _ASM_X86_PC_CONF_REG_H
+
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#define PC_CONF_INDEX          0x22
+#define PC_CONF_DATA           0x23
+
+#define PC_CONF_MPS_IMCR       0x70
+
+extern raw_spinlock_t pc_conf_lock;
+
+static inline u8 pc_conf_get(u8 reg)
+{
+       outb(reg, PC_CONF_INDEX);
+       return inb(PC_CONF_DATA);
+}
+
+static inline void pc_conf_set(u8 reg, u8 data)
+{
+       outb(reg, PC_CONF_INDEX);
+       outb(data, PC_CONF_DATA);
+}
+
+#endif /* _ASM_X86_PC_CONF_REG_H */
index df700a6..efe3e46 100644 (file)
@@ -5,14 +5,14 @@
  * Access order is always 0x22 (=offset), 0x23 (=value)
  */
 
+#include <asm/pc-conf-reg.h>
+
 static inline u8 getCx86(u8 reg)
 {
-       outb(reg, 0x22);
-       return inb(0x23);
+       return pc_conf_get(reg);
 }
 
 static inline void setCx86(u8 reg, u8 data)
 {
-       outb(reg, 0x22);
-       outb(data, 0x23);
+       pc_conf_set(reg, data);
 }
index f3020c5..9ad2aca 100644 (file)
@@ -136,6 +136,8 @@ struct cpuinfo_x86 {
        u16                     logical_die_id;
        /* Index into per_cpu list: */
        u16                     cpu_index;
+       /*  Is SMT active on this core? */
+       bool                    smt_active;
        u32                     microcode;
        /* Address space bits used by the cache internally */
        u8                      x86_cache_bits;
@@ -795,6 +797,8 @@ extern int set_tsc_mode(unsigned int val);
 
 DECLARE_PER_CPU(u64, msr_misc_features_shadow);
 
+extern u16 get_llc_id(unsigned int cpu);
+
 #ifdef CONFIG_CPU_SUP_AMD
 extern u32 amd_get_nodes_per_socket(void);
 extern u32 amd_get_highest_perf(void);
index e322676..b00dbc5 100644 (file)
@@ -184,6 +184,8 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
 #define V_IGN_TPR_SHIFT 20
 #define V_IGN_TPR_MASK (1 << V_IGN_TPR_SHIFT)
 
+#define V_IRQ_INJECTION_BITS_MASK (V_IRQ_MASK | V_INTR_PRIO_MASK | V_IGN_TPR_MASK)
+
 #define V_INTR_MASKING_SHIFT 24
 #define V_INTR_MASKING_MASK (1 << V_INTR_MASKING_SHIFT)
 
index de406d9..cf13266 100644 (file)
@@ -81,7 +81,7 @@ struct thread_info {
 #define TIF_SINGLESTEP         4       /* reenable singlestep on user return*/
 #define TIF_SSBD               5       /* Speculative store bypass disable */
 #define TIF_SPEC_IB            9       /* Indirect branch speculation mitigation */
-#define TIF_SPEC_FORCE_UPDATE  10      /* Force speculation MSR update in context switch */
+#define TIF_SPEC_L1D_FLUSH     10      /* Flush L1D on mm switches (processes) */
 #define TIF_USER_RETURN_NOTIFY 11      /* notify kernel of userspace return */
 #define TIF_UPROBE             12      /* breakpointed or singlestepping */
 #define TIF_PATCH_PENDING      13      /* pending live patching update */
@@ -93,6 +93,7 @@ struct thread_info {
 #define TIF_MEMDIE             20      /* is terminating due to OOM killer */
 #define TIF_POLLING_NRFLAG     21      /* idle is polling for TIF_NEED_RESCHED */
 #define TIF_IO_BITMAP          22      /* uses I/O bitmap */
+#define TIF_SPEC_FORCE_UPDATE  23      /* Force speculation MSR update in context switch */
 #define TIF_FORCED_TF          24      /* true if TF in eflags artificially */
 #define TIF_BLOCKSTEP          25      /* set when we want DEBUGCTLMSR_BTF */
 #define TIF_LAZY_MMU_UPDATES   27      /* task is updating the mmu lazily */
@@ -104,7 +105,7 @@ struct thread_info {
 #define _TIF_SINGLESTEP                (1 << TIF_SINGLESTEP)
 #define _TIF_SSBD              (1 << TIF_SSBD)
 #define _TIF_SPEC_IB           (1 << TIF_SPEC_IB)
-#define _TIF_SPEC_FORCE_UPDATE (1 << TIF_SPEC_FORCE_UPDATE)
+#define _TIF_SPEC_L1D_FLUSH    (1 << TIF_SPEC_L1D_FLUSH)
 #define _TIF_USER_RETURN_NOTIFY        (1 << TIF_USER_RETURN_NOTIFY)
 #define _TIF_UPROBE            (1 << TIF_UPROBE)
 #define _TIF_PATCH_PENDING     (1 << TIF_PATCH_PENDING)
@@ -115,6 +116,7 @@ struct thread_info {
 #define _TIF_SLD               (1 << TIF_SLD)
 #define _TIF_POLLING_NRFLAG    (1 << TIF_POLLING_NRFLAG)
 #define _TIF_IO_BITMAP         (1 << TIF_IO_BITMAP)
+#define _TIF_SPEC_FORCE_UPDATE (1 << TIF_SPEC_FORCE_UPDATE)
 #define _TIF_FORCED_TF         (1 << TIF_FORCED_TF)
 #define _TIF_BLOCKSTEP         (1 << TIF_BLOCKSTEP)
 #define _TIF_LAZY_MMU_UPDATES  (1 << TIF_LAZY_MMU_UPDATES)
index fa952ea..b587a9e 100644 (file)
@@ -83,7 +83,7 @@ struct tlb_state {
        /* Last user mm for optimizing IBPB */
        union {
                struct mm_struct        *last_user_mm;
-               unsigned long           last_user_mm_ibpb;
+               unsigned long           last_user_mm_spec;
        };
 
        u16 loaded_mm_asid;
index e55e0c1..14bcd59 100644 (file)
@@ -558,10 +558,10 @@ acpi_parse_nmi_src(union acpi_subtable_headers * header, const unsigned long end
  * If a PIC-mode SCI is not recognized or gives spurious IRQ7's
  * it may require Edge Trigger -- use "acpi_sci=edge"
  *
- * Port 0x4d0-4d1 are ECLR1 and ECLR2, the Edge/Level Control Registers
+ * Port 0x4d0-4d1 are ELCR1 and ELCR2, the Edge/Level Control Registers
  * for the 8259 PIC.  bit[n] = 1 means irq[n] is Level, otherwise Edge.
- * ECLR1 is IRQs 0-7 (IRQ 0, 1, 2 must be 0)
- * ECLR2 is IRQs 8-15 (IRQ 8, 13 must be 0)
+ * ELCR1 is IRQs 0-7 (IRQ 0, 1, 2 must be 0)
+ * ELCR2 is IRQs 8-15 (IRQ 8, 13 must be 0)
  */
 
 void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
@@ -570,7 +570,7 @@ void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
        unsigned int old, new;
 
        /* Real old ELCR mask */
-       old = inb(0x4d0) | (inb(0x4d1) << 8);
+       old = inb(PIC_ELCR1) | (inb(PIC_ELCR2) << 8);
 
        /*
         * If we use ACPI to set PCI IRQs, then we should clear ELCR
@@ -596,8 +596,8 @@ void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
                return;
 
        pr_warn("setting ELCR to %04x (from %04x)\n", new, old);
-       outb(new, 0x4d0);
-       outb(new >> 8, 0x4d1);
+       outb(new, PIC_ELCR1);
+       outb(new >> 8, PIC_ELCR2);
 }
 
 int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp)
index d262811..b70344b 100644 (file)
@@ -38,6 +38,7 @@
 
 #include <asm/trace/irq_vectors.h>
 #include <asm/irq_remapping.h>
+#include <asm/pc-conf-reg.h>
 #include <asm/perf_event.h>
 #include <asm/x86_init.h>
 #include <linux/atomic.h>
@@ -132,18 +133,14 @@ static int enabled_via_apicbase __ro_after_init;
  */
 static inline void imcr_pic_to_apic(void)
 {
-       /* select IMCR register */
-       outb(0x70, 0x22);
        /* NMI and 8259 INTR go through APIC */
-       outb(0x01, 0x23);
+       pc_conf_set(PC_CONF_MPS_IMCR, 0x01);
 }
 
 static inline void imcr_apic_to_pic(void)
 {
-       /* select IMCR register */
-       outb(0x70, 0x22);
        /* NMI and 8259 INTR go directly to BSP */
-       outb(0x00, 0x23);
+       pc_conf_set(PC_CONF_MPS_IMCR, 0x00);
 }
 #endif
 
index d5c691a..c1bb384 100644 (file)
@@ -764,7 +764,7 @@ static bool irq_active_low(int idx)
 static bool EISA_ELCR(unsigned int irq)
 {
        if (irq < nr_legacy_irqs()) {
-               unsigned int port = 0x4d0 + (irq >> 3);
+               unsigned int port = PIC_ELCR1 + (irq >> 3);
                return (inb(port) >> (irq & 7)) & 1;
        }
        apic_printk(APIC_VERBOSE, KERN_INFO
@@ -1986,7 +1986,8 @@ static struct irq_chip ioapic_chip __read_mostly = {
        .irq_set_affinity       = ioapic_set_affinity,
        .irq_retrigger          = irq_chip_retrigger_hierarchy,
        .irq_get_irqchip_state  = ioapic_irq_get_chip_state,
-       .flags                  = IRQCHIP_SKIP_SET_WAKE,
+       .flags                  = IRQCHIP_SKIP_SET_WAKE |
+                                 IRQCHIP_AFFINITY_PRE_STARTUP,
 };
 
 static struct irq_chip ioapic_ir_chip __read_mostly = {
@@ -1999,7 +2000,8 @@ static struct irq_chip ioapic_ir_chip __read_mostly = {
        .irq_set_affinity       = ioapic_set_affinity,
        .irq_retrigger          = irq_chip_retrigger_hierarchy,
        .irq_get_irqchip_state  = ioapic_irq_get_chip_state,
-       .flags                  = IRQCHIP_SKIP_SET_WAKE,
+       .flags                  = IRQCHIP_SKIP_SET_WAKE |
+                                 IRQCHIP_AFFINITY_PRE_STARTUP,
 };
 
 static inline void init_IO_APIC_traps(void)
index 44ebe25..dbacb9e 100644 (file)
@@ -58,11 +58,13 @@ msi_set_affinity(struct irq_data *irqd, const struct cpumask *mask, bool force)
         *   The quirk bit is not set in this case.
         * - The new vector is the same as the old vector
         * - The old vector is MANAGED_IRQ_SHUTDOWN_VECTOR (interrupt starts up)
+        * - The interrupt is not yet started up
         * - The new destination CPU is the same as the old destination CPU
         */
        if (!irqd_msi_nomask_quirk(irqd) ||
            cfg->vector == old_cfg.vector ||
            old_cfg.vector == MANAGED_IRQ_SHUTDOWN_VECTOR ||
+           !irqd_is_started(irqd) ||
            cfg->dest_apicid == old_cfg.dest_apicid) {
                irq_msi_update_msg(irqd, cfg);
                return ret;
@@ -150,7 +152,8 @@ static struct irq_chip pci_msi_controller = {
        .irq_ack                = irq_chip_ack_parent,
        .irq_retrigger          = irq_chip_retrigger_hierarchy,
        .irq_set_affinity       = msi_set_affinity,
-       .flags                  = IRQCHIP_SKIP_SET_WAKE,
+       .flags                  = IRQCHIP_SKIP_SET_WAKE |
+                                 IRQCHIP_AFFINITY_PRE_STARTUP,
 };
 
 int pci_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec,
@@ -219,7 +222,8 @@ static struct irq_chip pci_msi_ir_controller = {
        .irq_mask               = pci_msi_mask_irq,
        .irq_ack                = irq_chip_ack_parent,
        .irq_retrigger          = irq_chip_retrigger_hierarchy,
-       .flags                  = IRQCHIP_SKIP_SET_WAKE,
+       .flags                  = IRQCHIP_SKIP_SET_WAKE |
+                                 IRQCHIP_AFFINITY_PRE_STARTUP,
 };
 
 static struct msi_domain_info pci_msi_ir_domain_info = {
@@ -273,7 +277,8 @@ static struct irq_chip dmar_msi_controller = {
        .irq_retrigger          = irq_chip_retrigger_hierarchy,
        .irq_compose_msi_msg    = dmar_msi_compose_msg,
        .irq_write_msi_msg      = dmar_msi_write_msg,
-       .flags                  = IRQCHIP_SKIP_SET_WAKE,
+       .flags                  = IRQCHIP_SKIP_SET_WAKE |
+                                 IRQCHIP_AFFINITY_PRE_STARTUP,
 };
 
 static int dmar_msi_init(struct irq_domain *domain,
index fb67ed5..c132daa 100644 (file)
@@ -1299,7 +1299,7 @@ static void __init print_PIC(void)
 
        pr_debug("... PIC  ISR: %04x\n", v);
 
-       v = inb(0x4d1) << 8 | inb(0x4d0);
+       v = inb(PIC_ELCR2) << 8 | inb(PIC_ELCR1);
        pr_debug("... PIC ELCR: %04x\n", v);
 }
 
index b7c0030..2131af9 100644 (file)
@@ -438,7 +438,7 @@ static void srat_detect_node(struct cpuinfo_x86 *c)
 
        node = numa_cpu_node(cpu);
        if (node == NUMA_NO_NODE)
-               node = per_cpu(cpu_llc_id, cpu);
+               node = get_llc_id(cpu);
 
        /*
         * On multi-fabric platform (e.g. Numascale NumaChip) a
index d41b70f..ecfca3b 100644 (file)
@@ -43,6 +43,7 @@ static void __init mds_select_mitigation(void);
 static void __init mds_print_mitigation(void);
 static void __init taa_select_mitigation(void);
 static void __init srbds_select_mitigation(void);
+static void __init l1d_flush_select_mitigation(void);
 
 /* The base value of the SPEC_CTRL MSR that always has to be preserved. */
 u64 x86_spec_ctrl_base;
@@ -76,6 +77,13 @@ EXPORT_SYMBOL_GPL(mds_user_clear);
 DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
 EXPORT_SYMBOL_GPL(mds_idle_clear);
 
+/*
+ * Controls whether l1d flush based mitigations are enabled,
+ * based on hw features and admin setting via boot parameter
+ * defaults to false
+ */
+DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
+
 void __init check_bugs(void)
 {
        identify_boot_cpu();
@@ -111,6 +119,7 @@ void __init check_bugs(void)
        mds_select_mitigation();
        taa_select_mitigation();
        srbds_select_mitigation();
+       l1d_flush_select_mitigation();
 
        /*
         * As MDS and TAA mitigations are inter-related, print MDS
@@ -491,6 +500,34 @@ static int __init srbds_parse_cmdline(char *str)
 }
 early_param("srbds", srbds_parse_cmdline);
 
+#undef pr_fmt
+#define pr_fmt(fmt)     "L1D Flush : " fmt
+
+enum l1d_flush_mitigations {
+       L1D_FLUSH_OFF = 0,
+       L1D_FLUSH_ON,
+};
+
+static enum l1d_flush_mitigations l1d_flush_mitigation __initdata = L1D_FLUSH_OFF;
+
+static void __init l1d_flush_select_mitigation(void)
+{
+       if (!l1d_flush_mitigation || !boot_cpu_has(X86_FEATURE_FLUSH_L1D))
+               return;
+
+       static_branch_enable(&switch_mm_cond_l1d_flush);
+       pr_info("Conditional flush on switch_mm() enabled\n");
+}
+
+static int __init l1d_flush_parse_cmdline(char *str)
+{
+       if (!strcmp(str, "on"))
+               l1d_flush_mitigation = L1D_FLUSH_ON;
+
+       return 0;
+}
+early_param("l1d_flush", l1d_flush_parse_cmdline);
+
 #undef pr_fmt
 #define pr_fmt(fmt)     "Spectre V1 : " fmt
 
@@ -1215,6 +1252,24 @@ static void task_update_spec_tif(struct task_struct *tsk)
                speculation_ctrl_update_current();
 }
 
+static int l1d_flush_prctl_set(struct task_struct *task, unsigned long ctrl)
+{
+
+       if (!static_branch_unlikely(&switch_mm_cond_l1d_flush))
+               return -EPERM;
+
+       switch (ctrl) {
+       case PR_SPEC_ENABLE:
+               set_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH);
+               return 0;
+       case PR_SPEC_DISABLE:
+               clear_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH);
+               return 0;
+       default:
+               return -ERANGE;
+       }
+}
+
 static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
 {
        if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
@@ -1324,6 +1379,8 @@ int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
                return ssb_prctl_set(task, ctrl);
        case PR_SPEC_INDIRECT_BRANCH:
                return ib_prctl_set(task, ctrl);
+       case PR_SPEC_L1D_FLUSH:
+               return l1d_flush_prctl_set(task, ctrl);
        default:
                return -ENODEV;
        }
@@ -1340,6 +1397,17 @@ void arch_seccomp_spec_mitigate(struct task_struct *task)
 }
 #endif
 
+static int l1d_flush_prctl_get(struct task_struct *task)
+{
+       if (!static_branch_unlikely(&switch_mm_cond_l1d_flush))
+               return PR_SPEC_FORCE_DISABLE;
+
+       if (test_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH))
+               return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
+       else
+               return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
+}
+
 static int ssb_prctl_get(struct task_struct *task)
 {
        switch (ssb_mode) {
@@ -1390,6 +1458,8 @@ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
                return ssb_prctl_get(task);
        case PR_SPEC_INDIRECT_BRANCH:
                return ib_prctl_get(task);
+       case PR_SPEC_L1D_FLUSH:
+               return l1d_flush_prctl_get(task);
        default:
                return -ENODEV;
        }
index 64b805b..0f88859 100644 (file)
@@ -79,6 +79,12 @@ EXPORT_SYMBOL(smp_num_siblings);
 /* Last level cache ID of each logical CPU */
 DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id) = BAD_APICID;
 
+u16 get_llc_id(unsigned int cpu)
+{
+       return per_cpu(cpu_llc_id, cpu);
+}
+EXPORT_SYMBOL_GPL(get_llc_id);
+
 /* correctly size the local cpu masks */
 void __init setup_cpu_local_masks(void)
 {
index 22791aa..8cb7816 100644 (file)
@@ -817,7 +817,10 @@ log_it:
                if (mca_cfg.dont_log_ce && !mce_usable_address(&m))
                        goto clear_it;
 
-               mce_log(&m);
+               if (flags & MCP_QUEUE_LOG)
+                       mce_gen_pool_add(&m);
+               else
+                       mce_log(&m);
 
 clear_it:
                /*
@@ -1639,10 +1642,12 @@ static void __mcheck_cpu_init_generic(void)
                m_fl = MCP_DONTLOG;
 
        /*
-        * Log the machine checks left over from the previous reset.
+        * Log the machine checks left over from the previous reset. Log them
+        * only, do not start processing them. That will happen in mcheck_late_init()
+        * when all consumers have been registered on the notifier chain.
         */
        bitmap_fill(all_banks, MAX_NR_BANKS);
-       machine_check_poll(MCP_UC | m_fl, &all_banks);
+       machine_check_poll(MCP_UC | MCP_QUEUE_LOG | m_fl, &all_banks);
 
        cr4_set_bits(X86_CR4_MCE);
 
index 4e86d97..0bfc140 100644 (file)
@@ -235,7 +235,7 @@ static void __maybe_unused raise_mce(struct mce *m)
                unsigned long start;
                int cpu;
 
-               get_online_cpus();
+               cpus_read_lock();
                cpumask_copy(mce_inject_cpumask, cpu_online_mask);
                cpumask_clear_cpu(get_cpu(), mce_inject_cpumask);
                for_each_online_cpu(cpu) {
@@ -269,7 +269,7 @@ static void __maybe_unused raise_mce(struct mce *m)
                }
                raise_local();
                put_cpu();
-               put_online_cpus();
+               cpus_read_unlock();
        } else {
                preempt_disable();
                raise_local();
@@ -529,7 +529,7 @@ static void do_inject(void)
                cpu = get_nbc_for_node(topology_die_id(cpu));
        }
 
-       get_online_cpus();
+       cpus_read_lock();
        if (!cpu_online(cpu))
                goto err;
 
@@ -553,7 +553,7 @@ static void do_inject(void)
        }
 
 err:
-       put_online_cpus();
+       cpus_read_unlock();
 
 }
 
index 6a6318e..efb69be 100644 (file)
@@ -55,7 +55,7 @@ LIST_HEAD(microcode_cache);
  * All non cpu-hotplug-callback call sites use:
  *
  * - microcode_mutex to synchronize with each other;
- * - get/put_online_cpus() to synchronize with
+ * - cpus_read_lock/unlock() to synchronize with
  *   the cpu-hotplug-callback call sites.
  *
  * We guarantee that only a single cpu is being
@@ -431,7 +431,7 @@ static ssize_t microcode_write(struct file *file, const char __user *buf,
                return ret;
        }
 
-       get_online_cpus();
+       cpus_read_lock();
        mutex_lock(&microcode_mutex);
 
        if (do_microcode_update(buf, len) == 0)
@@ -441,7 +441,7 @@ static ssize_t microcode_write(struct file *file, const char __user *buf,
                perf_check_microcode();
 
        mutex_unlock(&microcode_mutex);
-       put_online_cpus();
+       cpus_read_unlock();
 
        return ret;
 }
@@ -629,7 +629,7 @@ static ssize_t reload_store(struct device *dev,
        if (val != 1)
                return size;
 
-       get_online_cpus();
+       cpus_read_lock();
 
        ret = check_online_cpus();
        if (ret)
@@ -644,7 +644,7 @@ static ssize_t reload_store(struct device *dev,
        mutex_unlock(&microcode_mutex);
 
 put:
-       put_online_cpus();
+       cpus_read_unlock();
 
        if (ret == 0)
                ret = size;
@@ -853,14 +853,14 @@ static int __init microcode_init(void)
        if (IS_ERR(microcode_pdev))
                return PTR_ERR(microcode_pdev);
 
-       get_online_cpus();
+       cpus_read_lock();
        mutex_lock(&microcode_mutex);
 
        error = subsys_interface_register(&mc_cpu_interface);
        if (!error)
                perf_check_microcode();
        mutex_unlock(&microcode_mutex);
-       put_online_cpus();
+       cpus_read_unlock();
 
        if (error)
                goto out_pdev;
@@ -892,13 +892,13 @@ static int __init microcode_init(void)
                           &cpu_root_microcode_group);
 
  out_driver:
-       get_online_cpus();
+       cpus_read_lock();
        mutex_lock(&microcode_mutex);
 
        subsys_interface_unregister(&mc_cpu_interface);
 
        mutex_unlock(&microcode_mutex);
-       put_online_cpus();
+       cpus_read_unlock();
 
  out_pdev:
        platform_device_unregister(microcode_pdev);
index a76694b..2746cac 100644 (file)
@@ -336,7 +336,7 @@ int mtrr_add_page(unsigned long base, unsigned long size,
        replace = -1;
 
        /* No CPU hotplug when we change MTRR entries */
-       get_online_cpus();
+       cpus_read_lock();
 
        /* Search for existing MTRR  */
        mutex_lock(&mtrr_mutex);
@@ -398,7 +398,7 @@ int mtrr_add_page(unsigned long base, unsigned long size,
        error = i;
  out:
        mutex_unlock(&mtrr_mutex);
-       put_online_cpus();
+       cpus_read_unlock();
        return error;
 }
 
@@ -485,7 +485,7 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
 
        max = num_var_ranges;
        /* No CPU hotplug when we change MTRR entries */
-       get_online_cpus();
+       cpus_read_lock();
        mutex_lock(&mtrr_mutex);
        if (reg < 0) {
                /*  Search for existing MTRR  */
@@ -520,7 +520,7 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
        error = reg;
  out:
        mutex_unlock(&mtrr_mutex);
-       put_online_cpus();
+       cpus_read_unlock();
        return error;
 }
 
index 23001ae..4b8813b 100644 (file)
@@ -57,128 +57,57 @@ static void
 mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m,
              struct rdt_resource *r);
 
-#define domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].domains)
+#define domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].r_resctrl.domains)
 
-struct rdt_resource rdt_resources_all[] = {
+struct rdt_hw_resource rdt_resources_all[] = {
        [RDT_RESOURCE_L3] =
        {
-               .rid                    = RDT_RESOURCE_L3,
-               .name                   = "L3",
-               .domains                = domain_init(RDT_RESOURCE_L3),
-               .msr_base               = MSR_IA32_L3_CBM_BASE,
-               .msr_update             = cat_wrmsr,
-               .cache_level            = 3,
-               .cache = {
-                       .min_cbm_bits   = 1,
-                       .cbm_idx_mult   = 1,
-                       .cbm_idx_offset = 0,
-               },
-               .parse_ctrlval          = parse_cbm,
-               .format_str             = "%d=%0*x",
-               .fflags                 = RFTYPE_RES_CACHE,
-       },
-       [RDT_RESOURCE_L3DATA] =
-       {
-               .rid                    = RDT_RESOURCE_L3DATA,
-               .name                   = "L3DATA",
-               .domains                = domain_init(RDT_RESOURCE_L3DATA),
-               .msr_base               = MSR_IA32_L3_CBM_BASE,
-               .msr_update             = cat_wrmsr,
-               .cache_level            = 3,
-               .cache = {
-                       .min_cbm_bits   = 1,
-                       .cbm_idx_mult   = 2,
-                       .cbm_idx_offset = 0,
+               .r_resctrl = {
+                       .rid                    = RDT_RESOURCE_L3,
+                       .name                   = "L3",
+                       .cache_level            = 3,
+                       .cache = {
+                               .min_cbm_bits   = 1,
+                       },
+                       .domains                = domain_init(RDT_RESOURCE_L3),
+                       .parse_ctrlval          = parse_cbm,
+                       .format_str             = "%d=%0*x",
+                       .fflags                 = RFTYPE_RES_CACHE,
                },
-               .parse_ctrlval          = parse_cbm,
-               .format_str             = "%d=%0*x",
-               .fflags                 = RFTYPE_RES_CACHE,
-       },
-       [RDT_RESOURCE_L3CODE] =
-       {
-               .rid                    = RDT_RESOURCE_L3CODE,
-               .name                   = "L3CODE",
-               .domains                = domain_init(RDT_RESOURCE_L3CODE),
                .msr_base               = MSR_IA32_L3_CBM_BASE,
                .msr_update             = cat_wrmsr,
-               .cache_level            = 3,
-               .cache = {
-                       .min_cbm_bits   = 1,
-                       .cbm_idx_mult   = 2,
-                       .cbm_idx_offset = 1,
-               },
-               .parse_ctrlval          = parse_cbm,
-               .format_str             = "%d=%0*x",
-               .fflags                 = RFTYPE_RES_CACHE,
        },
        [RDT_RESOURCE_L2] =
        {
-               .rid                    = RDT_RESOURCE_L2,
-               .name                   = "L2",
-               .domains                = domain_init(RDT_RESOURCE_L2),
-               .msr_base               = MSR_IA32_L2_CBM_BASE,
-               .msr_update             = cat_wrmsr,
-               .cache_level            = 2,
-               .cache = {
-                       .min_cbm_bits   = 1,
-                       .cbm_idx_mult   = 1,
-                       .cbm_idx_offset = 0,
+               .r_resctrl = {
+                       .rid                    = RDT_RESOURCE_L2,
+                       .name                   = "L2",
+                       .cache_level            = 2,
+                       .cache = {
+                               .min_cbm_bits   = 1,
+                       },
+                       .domains                = domain_init(RDT_RESOURCE_L2),
+                       .parse_ctrlval          = parse_cbm,
+                       .format_str             = "%d=%0*x",
+                       .fflags                 = RFTYPE_RES_CACHE,
                },
-               .parse_ctrlval          = parse_cbm,
-               .format_str             = "%d=%0*x",
-               .fflags                 = RFTYPE_RES_CACHE,
-       },
-       [RDT_RESOURCE_L2DATA] =
-       {
-               .rid                    = RDT_RESOURCE_L2DATA,
-               .name                   = "L2DATA",
-               .domains                = domain_init(RDT_RESOURCE_L2DATA),
                .msr_base               = MSR_IA32_L2_CBM_BASE,
                .msr_update             = cat_wrmsr,
-               .cache_level            = 2,
-               .cache = {
-                       .min_cbm_bits   = 1,
-                       .cbm_idx_mult   = 2,
-                       .cbm_idx_offset = 0,
-               },
-               .parse_ctrlval          = parse_cbm,
-               .format_str             = "%d=%0*x",
-               .fflags                 = RFTYPE_RES_CACHE,
-       },
-       [RDT_RESOURCE_L2CODE] =
-       {
-               .rid                    = RDT_RESOURCE_L2CODE,
-               .name                   = "L2CODE",
-               .domains                = domain_init(RDT_RESOURCE_L2CODE),
-               .msr_base               = MSR_IA32_L2_CBM_BASE,
-               .msr_update             = cat_wrmsr,
-               .cache_level            = 2,
-               .cache = {
-                       .min_cbm_bits   = 1,
-                       .cbm_idx_mult   = 2,
-                       .cbm_idx_offset = 1,
-               },
-               .parse_ctrlval          = parse_cbm,
-               .format_str             = "%d=%0*x",
-               .fflags                 = RFTYPE_RES_CACHE,
        },
        [RDT_RESOURCE_MBA] =
        {
-               .rid                    = RDT_RESOURCE_MBA,
-               .name                   = "MB",
-               .domains                = domain_init(RDT_RESOURCE_MBA),
-               .cache_level            = 3,
-               .parse_ctrlval          = parse_bw,
-               .format_str             = "%d=%*u",
-               .fflags                 = RFTYPE_RES_MB,
+               .r_resctrl = {
+                       .rid                    = RDT_RESOURCE_MBA,
+                       .name                   = "MB",
+                       .cache_level            = 3,
+                       .domains                = domain_init(RDT_RESOURCE_MBA),
+                       .parse_ctrlval          = parse_bw,
+                       .format_str             = "%d=%*u",
+                       .fflags                 = RFTYPE_RES_MB,
+               },
        },
 };
 
-static unsigned int cbm_idx(struct rdt_resource *r, unsigned int closid)
-{
-       return closid * r->cache.cbm_idx_mult + r->cache.cbm_idx_offset;
-}
-
 /*
  * cache_alloc_hsw_probe() - Have to probe for Intel haswell server CPUs
  * as they do not have CPUID enumeration support for Cache allocation.
@@ -199,7 +128,8 @@ static unsigned int cbm_idx(struct rdt_resource *r, unsigned int closid)
  */
 static inline void cache_alloc_hsw_probe(void)
 {
-       struct rdt_resource *r  = &rdt_resources_all[RDT_RESOURCE_L3];
+       struct rdt_hw_resource *hw_res = &rdt_resources_all[RDT_RESOURCE_L3];
+       struct rdt_resource *r  = &hw_res->r_resctrl;
        u32 l, h, max_cbm = BIT_MASK(20) - 1;
 
        if (wrmsr_safe(MSR_IA32_L3_CBM_BASE, max_cbm, 0))
@@ -211,7 +141,7 @@ static inline void cache_alloc_hsw_probe(void)
        if (l != max_cbm)
                return;
 
-       r->num_closid = 4;
+       hw_res->num_closid = 4;
        r->default_ctrl = max_cbm;
        r->cache.cbm_len = 20;
        r->cache.shareable_bits = 0xc0000;
@@ -225,7 +155,7 @@ static inline void cache_alloc_hsw_probe(void)
 bool is_mba_sc(struct rdt_resource *r)
 {
        if (!r)
-               return rdt_resources_all[RDT_RESOURCE_MBA].membw.mba_sc;
+               return rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl.membw.mba_sc;
 
        return r->membw.mba_sc;
 }
@@ -253,12 +183,13 @@ static inline bool rdt_get_mb_table(struct rdt_resource *r)
 
 static bool __get_mem_config_intel(struct rdt_resource *r)
 {
+       struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
        union cpuid_0x10_3_eax eax;
        union cpuid_0x10_x_edx edx;
        u32 ebx, ecx, max_delay;
 
        cpuid_count(0x00000010, 3, &eax.full, &ebx, &ecx, &edx.full);
-       r->num_closid = edx.split.cos_max + 1;
+       hw_res->num_closid = edx.split.cos_max + 1;
        max_delay = eax.split.max_delay + 1;
        r->default_ctrl = MAX_MBA_BW;
        r->membw.arch_needs_linear = true;
@@ -287,12 +218,13 @@ static bool __get_mem_config_intel(struct rdt_resource *r)
 
 static bool __rdt_get_mem_config_amd(struct rdt_resource *r)
 {
+       struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
        union cpuid_0x10_3_eax eax;
        union cpuid_0x10_x_edx edx;
        u32 ebx, ecx;
 
        cpuid_count(0x80000020, 1, &eax.full, &ebx, &ecx, &edx.full);
-       r->num_closid = edx.split.cos_max + 1;
+       hw_res->num_closid = edx.split.cos_max + 1;
        r->default_ctrl = MAX_MBA_BW_AMD;
 
        /* AMD does not use delay */
@@ -317,12 +249,13 @@ static bool __rdt_get_mem_config_amd(struct rdt_resource *r)
 
 static void rdt_get_cache_alloc_cfg(int idx, struct rdt_resource *r)
 {
+       struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
        union cpuid_0x10_1_eax eax;
        union cpuid_0x10_x_edx edx;
        u32 ebx, ecx;
 
        cpuid_count(0x00000010, idx, &eax.full, &ebx, &ecx, &edx.full);
-       r->num_closid = edx.split.cos_max + 1;
+       hw_res->num_closid = edx.split.cos_max + 1;
        r->cache.cbm_len = eax.split.cbm_len + 1;
        r->default_ctrl = BIT_MASK(eax.split.cbm_len + 1) - 1;
        r->cache.shareable_bits = ebx & r->default_ctrl;
@@ -331,43 +264,35 @@ static void rdt_get_cache_alloc_cfg(int idx, struct rdt_resource *r)
        r->alloc_enabled = true;
 }
 
-static void rdt_get_cdp_config(int level, int type)
+static void rdt_get_cdp_config(int level)
 {
-       struct rdt_resource *r_l = &rdt_resources_all[level];
-       struct rdt_resource *r = &rdt_resources_all[type];
-
-       r->num_closid = r_l->num_closid / 2;
-       r->cache.cbm_len = r_l->cache.cbm_len;
-       r->default_ctrl = r_l->default_ctrl;
-       r->cache.shareable_bits = r_l->cache.shareable_bits;
-       r->data_width = (r->cache.cbm_len + 3) / 4;
-       r->alloc_capable = true;
        /*
         * By default, CDP is disabled. CDP can be enabled by mount parameter
         * "cdp" during resctrl file system mount time.
         */
-       r->alloc_enabled = false;
+       rdt_resources_all[level].cdp_enabled = false;
+       rdt_resources_all[level].r_resctrl.cdp_capable = true;
 }
 
 static void rdt_get_cdp_l3_config(void)
 {
-       rdt_get_cdp_config(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA);
-       rdt_get_cdp_config(RDT_RESOURCE_L3, RDT_RESOURCE_L3CODE);
+       rdt_get_cdp_config(RDT_RESOURCE_L3);
 }
 
 static void rdt_get_cdp_l2_config(void)
 {
-       rdt_get_cdp_config(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA);
-       rdt_get_cdp_config(RDT_RESOURCE_L2, RDT_RESOURCE_L2CODE);
+       rdt_get_cdp_config(RDT_RESOURCE_L2);
 }
 
 static void
 mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r)
 {
        unsigned int i;
+       struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
+       struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
 
        for (i = m->low; i < m->high; i++)
-               wrmsrl(r->msr_base + i, d->ctrl_val[i]);
+               wrmsrl(hw_res->msr_base + i, hw_dom->ctrl_val[i]);
 }
 
 /*
@@ -389,19 +314,23 @@ mba_wrmsr_intel(struct rdt_domain *d, struct msr_param *m,
                struct rdt_resource *r)
 {
        unsigned int i;
+       struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
+       struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
 
        /*  Write the delay values for mba. */
        for (i = m->low; i < m->high; i++)
-               wrmsrl(r->msr_base + i, delay_bw_map(d->ctrl_val[i], r));
+               wrmsrl(hw_res->msr_base + i, delay_bw_map(hw_dom->ctrl_val[i], r));
 }
 
 static void
 cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r)
 {
        unsigned int i;
+       struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
+       struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
 
        for (i = m->low; i < m->high; i++)
-               wrmsrl(r->msr_base + cbm_idx(r, i), d->ctrl_val[i]);
+               wrmsrl(hw_res->msr_base + i, hw_dom->ctrl_val[i]);
 }
 
 struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r)
@@ -417,16 +346,22 @@ struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r)
        return NULL;
 }
 
+u32 resctrl_arch_get_num_closid(struct rdt_resource *r)
+{
+       return resctrl_to_arch_res(r)->num_closid;
+}
+
 void rdt_ctrl_update(void *arg)
 {
        struct msr_param *m = arg;
+       struct rdt_hw_resource *hw_res = resctrl_to_arch_res(m->res);
        struct rdt_resource *r = m->res;
        int cpu = smp_processor_id();
        struct rdt_domain *d;
 
        d = get_domain_from_cpu(cpu, r);
        if (d) {
-               r->msr_update(d, m, r);
+               hw_res->msr_update(d, m, r);
                return;
        }
        pr_warn_once("cpu %d not found in any domain for resource %s\n",
@@ -468,6 +403,7 @@ struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id,
 
 void setup_default_ctrlval(struct rdt_resource *r, u32 *dc, u32 *dm)
 {
+       struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
        int i;
 
        /*
@@ -476,7 +412,7 @@ void setup_default_ctrlval(struct rdt_resource *r, u32 *dc, u32 *dm)
         * For Memory Allocation: Set b/w requested to 100%
         * and the bandwidth in MBps to U32_MAX
         */
-       for (i = 0; i < r->num_closid; i++, dc++, dm++) {
+       for (i = 0; i < hw_res->num_closid; i++, dc++, dm++) {
                *dc = r->default_ctrl;
                *dm = MBA_MAX_MBPS;
        }
@@ -484,26 +420,30 @@ void setup_default_ctrlval(struct rdt_resource *r, u32 *dc, u32 *dm)
 
 static int domain_setup_ctrlval(struct rdt_resource *r, struct rdt_domain *d)
 {
+       struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
+       struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
        struct msr_param m;
        u32 *dc, *dm;
 
-       dc = kmalloc_array(r->num_closid, sizeof(*d->ctrl_val), GFP_KERNEL);
+       dc = kmalloc_array(hw_res->num_closid, sizeof(*hw_dom->ctrl_val),
+                          GFP_KERNEL);
        if (!dc)
                return -ENOMEM;
 
-       dm = kmalloc_array(r->num_closid, sizeof(*d->mbps_val), GFP_KERNEL);
+       dm = kmalloc_array(hw_res->num_closid, sizeof(*hw_dom->mbps_val),
+                          GFP_KERNEL);
        if (!dm) {
                kfree(dc);
                return -ENOMEM;
        }
 
-       d->ctrl_val = dc;
-       d->mbps_val = dm;
+       hw_dom->ctrl_val = dc;
+       hw_dom->mbps_val = dm;
        setup_default_ctrlval(r, dc, dm);
 
        m.low = 0;
-       m.high = r->num_closid;
-       r->msr_update(d, &m, r);
+       m.high = hw_res->num_closid;
+       hw_res->msr_update(d, &m, r);
        return 0;
 }
 
@@ -560,6 +500,7 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
 {
        int id = get_cpu_cacheinfo_id(cpu, r->cache_level);
        struct list_head *add_pos = NULL;
+       struct rdt_hw_domain *hw_dom;
        struct rdt_domain *d;
 
        d = rdt_find_domain(r, id, &add_pos);
@@ -575,10 +516,11 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
                return;
        }
 
-       d = kzalloc_node(sizeof(*d), GFP_KERNEL, cpu_to_node(cpu));
-       if (!d)
+       hw_dom = kzalloc_node(sizeof(*hw_dom), GFP_KERNEL, cpu_to_node(cpu));
+       if (!hw_dom)
                return;
 
+       d = &hw_dom->d_resctrl;
        d->id = id;
        cpumask_set_cpu(cpu, &d->cpu_mask);
 
@@ -607,6 +549,7 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
 static void domain_remove_cpu(int cpu, struct rdt_resource *r)
 {
        int id = get_cpu_cacheinfo_id(cpu, r->cache_level);
+       struct rdt_hw_domain *hw_dom;
        struct rdt_domain *d;
 
        d = rdt_find_domain(r, id, NULL);
@@ -614,6 +557,7 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r)
                pr_warn("Couldn't find cache id for CPU %d\n", cpu);
                return;
        }
+       hw_dom = resctrl_to_arch_dom(d);
 
        cpumask_clear_cpu(cpu, &d->cpu_mask);
        if (cpumask_empty(&d->cpu_mask)) {
@@ -646,16 +590,16 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r)
                if (d->plr)
                        d->plr->d = NULL;
 
-               kfree(d->ctrl_val);
-               kfree(d->mbps_val);
+               kfree(hw_dom->ctrl_val);
+               kfree(hw_dom->mbps_val);
                bitmap_free(d->rmid_busy_llc);
                kfree(d->mbm_total);
                kfree(d->mbm_local);
-               kfree(d);
+               kfree(hw_dom);
                return;
        }
 
-       if (r == &rdt_resources_all[RDT_RESOURCE_L3]) {
+       if (r == &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl) {
                if (is_mbm_enabled() && cpu == d->mbm_work_cpu) {
                        cancel_delayed_work(&d->mbm_over);
                        mbm_setup_overflow_handler(d, 0);
@@ -732,13 +676,8 @@ static int resctrl_offline_cpu(unsigned int cpu)
 static __init void rdt_init_padding(void)
 {
        struct rdt_resource *r;
-       int cl;
 
        for_each_alloc_capable_rdt_resource(r) {
-               cl = strlen(r->name);
-               if (cl > max_name_width)
-                       max_name_width = cl;
-
                if (r->data_width > max_data_width)
                        max_data_width = r->data_width;
        }
@@ -827,19 +766,22 @@ static bool __init rdt_cpu_has(int flag)
 
 static __init bool get_mem_config(void)
 {
+       struct rdt_hw_resource *hw_res = &rdt_resources_all[RDT_RESOURCE_MBA];
+
        if (!rdt_cpu_has(X86_FEATURE_MBA))
                return false;
 
        if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
-               return __get_mem_config_intel(&rdt_resources_all[RDT_RESOURCE_MBA]);
+               return __get_mem_config_intel(&hw_res->r_resctrl);
        else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
-               return __rdt_get_mem_config_amd(&rdt_resources_all[RDT_RESOURCE_MBA]);
+               return __rdt_get_mem_config_amd(&hw_res->r_resctrl);
 
        return false;
 }
 
 static __init bool get_rdt_alloc_resources(void)
 {
+       struct rdt_resource *r;
        bool ret = false;
 
        if (rdt_alloc_capable)
@@ -849,14 +791,16 @@ static __init bool get_rdt_alloc_resources(void)
                return false;
 
        if (rdt_cpu_has(X86_FEATURE_CAT_L3)) {
-               rdt_get_cache_alloc_cfg(1, &rdt_resources_all[RDT_RESOURCE_L3]);
+               r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
+               rdt_get_cache_alloc_cfg(1, r);
                if (rdt_cpu_has(X86_FEATURE_CDP_L3))
                        rdt_get_cdp_l3_config();
                ret = true;
        }
        if (rdt_cpu_has(X86_FEATURE_CAT_L2)) {
                /* CPUID 0x10.2 fields are same format at 0x10.1 */
-               rdt_get_cache_alloc_cfg(2, &rdt_resources_all[RDT_RESOURCE_L2]);
+               r = &rdt_resources_all[RDT_RESOURCE_L2].r_resctrl;
+               rdt_get_cache_alloc_cfg(2, r);
                if (rdt_cpu_has(X86_FEATURE_CDP_L2))
                        rdt_get_cdp_l2_config();
                ret = true;
@@ -870,6 +814,8 @@ static __init bool get_rdt_alloc_resources(void)
 
 static __init bool get_rdt_mon_resources(void)
 {
+       struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
+
        if (rdt_cpu_has(X86_FEATURE_CQM_OCCUP_LLC))
                rdt_mon_features |= (1 << QOS_L3_OCCUP_EVENT_ID);
        if (rdt_cpu_has(X86_FEATURE_CQM_MBM_TOTAL))
@@ -880,7 +826,7 @@ static __init bool get_rdt_mon_resources(void)
        if (!rdt_mon_features)
                return false;
 
-       return !rdt_get_mon_l3_config(&rdt_resources_all[RDT_RESOURCE_L3]);
+       return !rdt_get_mon_l3_config(r);
 }
 
 static __init void __check_quirks_intel(void)
@@ -918,42 +864,40 @@ static __init bool get_rdt_resources(void)
 
 static __init void rdt_init_res_defs_intel(void)
 {
+       struct rdt_hw_resource *hw_res;
        struct rdt_resource *r;
 
        for_each_rdt_resource(r) {
+               hw_res = resctrl_to_arch_res(r);
+
                if (r->rid == RDT_RESOURCE_L3 ||
-                   r->rid == RDT_RESOURCE_L3DATA ||
-                   r->rid == RDT_RESOURCE_L3CODE ||
-                   r->rid == RDT_RESOURCE_L2 ||
-                   r->rid == RDT_RESOURCE_L2DATA ||
-                   r->rid == RDT_RESOURCE_L2CODE) {
+                   r->rid == RDT_RESOURCE_L2) {
                        r->cache.arch_has_sparse_bitmaps = false;
                        r->cache.arch_has_empty_bitmaps = false;
                        r->cache.arch_has_per_cpu_cfg = false;
                } else if (r->rid == RDT_RESOURCE_MBA) {
-                       r->msr_base = MSR_IA32_MBA_THRTL_BASE;
-                       r->msr_update = mba_wrmsr_intel;
+                       hw_res->msr_base = MSR_IA32_MBA_THRTL_BASE;
+                       hw_res->msr_update = mba_wrmsr_intel;
                }
        }
 }
 
 static __init void rdt_init_res_defs_amd(void)
 {
+       struct rdt_hw_resource *hw_res;
        struct rdt_resource *r;
 
        for_each_rdt_resource(r) {
+               hw_res = resctrl_to_arch_res(r);
+
                if (r->rid == RDT_RESOURCE_L3 ||
-                   r->rid == RDT_RESOURCE_L3DATA ||
-                   r->rid == RDT_RESOURCE_L3CODE ||
-                   r->rid == RDT_RESOURCE_L2 ||
-                   r->rid == RDT_RESOURCE_L2DATA ||
-                   r->rid == RDT_RESOURCE_L2CODE) {
+                   r->rid == RDT_RESOURCE_L2) {
                        r->cache.arch_has_sparse_bitmaps = true;
                        r->cache.arch_has_empty_bitmaps = true;
                        r->cache.arch_has_per_cpu_cfg = true;
                } else if (r->rid == RDT_RESOURCE_MBA) {
-                       r->msr_base = MSR_IA32_MBA_BW_BASE;
-                       r->msr_update = mba_wrmsr_amd;
+                       hw_res->msr_base = MSR_IA32_MBA_BW_BASE;
+                       hw_res->msr_update = mba_wrmsr_amd;
                }
        }
 }
index c877642..8766627 100644 (file)
@@ -57,20 +57,23 @@ static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r)
        return true;
 }
 
-int parse_bw(struct rdt_parse_data *data, struct rdt_resource *r,
+int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s,
             struct rdt_domain *d)
 {
+       struct resctrl_staged_config *cfg;
+       struct rdt_resource *r = s->res;
        unsigned long bw_val;
 
-       if (d->have_new_ctrl) {
+       cfg = &d->staged_config[s->conf_type];
+       if (cfg->have_new_ctrl) {
                rdt_last_cmd_printf("Duplicate domain %d\n", d->id);
                return -EINVAL;
        }
 
        if (!bw_validate(data->buf, &bw_val, r))
                return -EINVAL;
-       d->new_ctrl = bw_val;
-       d->have_new_ctrl = true;
+       cfg->new_ctrl = bw_val;
+       cfg->have_new_ctrl = true;
 
        return 0;
 }
@@ -125,13 +128,16 @@ static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
  * Read one cache bit mask (hex). Check that it is valid for the current
  * resource type.
  */
-int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
+int parse_cbm(struct rdt_parse_data *data, struct resctrl_schema *s,
              struct rdt_domain *d)
 {
        struct rdtgroup *rdtgrp = data->rdtgrp;
+       struct resctrl_staged_config *cfg;
+       struct rdt_resource *r = s->res;
        u32 cbm_val;
 
-       if (d->have_new_ctrl) {
+       cfg = &d->staged_config[s->conf_type];
+       if (cfg->have_new_ctrl) {
                rdt_last_cmd_printf("Duplicate domain %d\n", d->id);
                return -EINVAL;
        }
@@ -160,12 +166,12 @@ int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
         * The CBM may not overlap with the CBM of another closid if
         * either is exclusive.
         */
-       if (rdtgroup_cbm_overlaps(r, d, cbm_val, rdtgrp->closid, true)) {
+       if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, true)) {
                rdt_last_cmd_puts("Overlaps with exclusive group\n");
                return -EINVAL;
        }
 
-       if (rdtgroup_cbm_overlaps(r, d, cbm_val, rdtgrp->closid, false)) {
+       if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, false)) {
                if (rdtgrp->mode == RDT_MODE_EXCLUSIVE ||
                    rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
                        rdt_last_cmd_puts("Overlaps with other group\n");
@@ -173,8 +179,8 @@ int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
                }
        }
 
-       d->new_ctrl = cbm_val;
-       d->have_new_ctrl = true;
+       cfg->new_ctrl = cbm_val;
+       cfg->have_new_ctrl = true;
 
        return 0;
 }
@@ -185,9 +191,12 @@ int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
  * separated by ";". The "id" is in decimal, and must match one of
  * the "id"s for this resource.
  */
-static int parse_line(char *line, struct rdt_resource *r,
+static int parse_line(char *line, struct resctrl_schema *s,
                      struct rdtgroup *rdtgrp)
 {
+       enum resctrl_conf_type t = s->conf_type;
+       struct resctrl_staged_config *cfg;
+       struct rdt_resource *r = s->res;
        struct rdt_parse_data data;
        char *dom = NULL, *id;
        struct rdt_domain *d;
@@ -213,9 +222,10 @@ next:
                if (d->id == dom_id) {
                        data.buf = dom;
                        data.rdtgrp = rdtgrp;
-                       if (r->parse_ctrlval(&data, r, d))
+                       if (r->parse_ctrlval(&data, s, d))
                                return -EINVAL;
                        if (rdtgrp->mode ==  RDT_MODE_PSEUDO_LOCKSETUP) {
+                               cfg = &d->staged_config[t];
                                /*
                                 * In pseudo-locking setup mode and just
                                 * parsed a valid CBM that should be
@@ -224,9 +234,9 @@ next:
                                 * the required initialization for single
                                 * region and return.
                                 */
-                               rdtgrp->plr->r = r;
+                               rdtgrp->plr->s = s;
                                rdtgrp->plr->d = d;
-                               rdtgrp->plr->cbm = d->new_ctrl;
+                               rdtgrp->plr->cbm = cfg->new_ctrl;
                                d->plr = rdtgrp->plr;
                                return 0;
                        }
@@ -236,28 +246,72 @@ next:
        return -EINVAL;
 }
 
-int update_domains(struct rdt_resource *r, int closid)
+static u32 get_config_index(u32 closid, enum resctrl_conf_type type)
 {
+       switch (type) {
+       default:
+       case CDP_NONE:
+               return closid;
+       case CDP_CODE:
+               return closid * 2 + 1;
+       case CDP_DATA:
+               return closid * 2;
+       }
+}
+
+static bool apply_config(struct rdt_hw_domain *hw_dom,
+                        struct resctrl_staged_config *cfg, u32 idx,
+                        cpumask_var_t cpu_mask, bool mba_sc)
+{
+       struct rdt_domain *dom = &hw_dom->d_resctrl;
+       u32 *dc = !mba_sc ? hw_dom->ctrl_val : hw_dom->mbps_val;
+
+       if (cfg->new_ctrl != dc[idx]) {
+               cpumask_set_cpu(cpumask_any(&dom->cpu_mask), cpu_mask);
+               dc[idx] = cfg->new_ctrl;
+
+               return true;
+       }
+
+       return false;
+}
+
+int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid)
+{
+       struct resctrl_staged_config *cfg;
+       struct rdt_hw_domain *hw_dom;
        struct msr_param msr_param;
+       enum resctrl_conf_type t;
        cpumask_var_t cpu_mask;
        struct rdt_domain *d;
        bool mba_sc;
-       u32 *dc;
        int cpu;
+       u32 idx;
 
        if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
                return -ENOMEM;
 
-       msr_param.low = closid;
-       msr_param.high = msr_param.low + 1;
-       msr_param.res = r;
-
        mba_sc = is_mba_sc(r);
+       msr_param.res = NULL;
        list_for_each_entry(d, &r->domains, list) {
-               dc = !mba_sc ? d->ctrl_val : d->mbps_val;
-               if (d->have_new_ctrl && d->new_ctrl != dc[closid]) {
-                       cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
-                       dc[closid] = d->new_ctrl;
+               hw_dom = resctrl_to_arch_dom(d);
+               for (t = 0; t < CDP_NUM_TYPES; t++) {
+                       cfg = &hw_dom->d_resctrl.staged_config[t];
+                       if (!cfg->have_new_ctrl)
+                               continue;
+
+                       idx = get_config_index(closid, t);
+                       if (!apply_config(hw_dom, cfg, idx, cpu_mask, mba_sc))
+                               continue;
+
+                       if (!msr_param.res) {
+                               msr_param.low = idx;
+                               msr_param.high = msr_param.low + 1;
+                               msr_param.res = r;
+                       } else {
+                               msr_param.low = min(msr_param.low, idx);
+                               msr_param.high = max(msr_param.high, idx + 1);
+                       }
                }
        }
 
@@ -284,11 +338,11 @@ done:
 static int rdtgroup_parse_resource(char *resname, char *tok,
                                   struct rdtgroup *rdtgrp)
 {
-       struct rdt_resource *r;
+       struct resctrl_schema *s;
 
-       for_each_alloc_enabled_rdt_resource(r) {
-               if (!strcmp(resname, r->name) && rdtgrp->closid < r->num_closid)
-                       return parse_line(tok, r, rdtgrp);
+       list_for_each_entry(s, &resctrl_schema_all, list) {
+               if (!strcmp(resname, s->name) && rdtgrp->closid < s->num_closid)
+                       return parse_line(tok, s, rdtgrp);
        }
        rdt_last_cmd_printf("Unknown or unsupported resource name '%s'\n", resname);
        return -EINVAL;
@@ -297,6 +351,7 @@ static int rdtgroup_parse_resource(char *resname, char *tok,
 ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
                                char *buf, size_t nbytes, loff_t off)
 {
+       struct resctrl_schema *s;
        struct rdtgroup *rdtgrp;
        struct rdt_domain *dom;
        struct rdt_resource *r;
@@ -327,9 +382,9 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
                goto out;
        }
 
-       for_each_alloc_enabled_rdt_resource(r) {
-               list_for_each_entry(dom, &r->domains, list)
-                       dom->have_new_ctrl = false;
+       list_for_each_entry(s, &resctrl_schema_all, list) {
+               list_for_each_entry(dom, &s->res->domains, list)
+                       memset(dom->staged_config, 0, sizeof(dom->staged_config));
        }
 
        while ((tok = strsep(&buf, "\n")) != NULL) {
@@ -349,8 +404,9 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
                        goto out;
        }
 
-       for_each_alloc_enabled_rdt_resource(r) {
-               ret = update_domains(r, rdtgrp->closid);
+       list_for_each_entry(s, &resctrl_schema_all, list) {
+               r = s->res;
+               ret = resctrl_arch_update_domains(r, rdtgrp->closid);
                if (ret)
                        goto out;
        }
@@ -371,19 +427,31 @@ out:
        return ret ?: nbytes;
 }
 
-static void show_doms(struct seq_file *s, struct rdt_resource *r, int closid)
+u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d,
+                           u32 closid, enum resctrl_conf_type type)
+{
+       struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
+       u32 idx = get_config_index(closid, type);
+
+       if (!is_mba_sc(r))
+               return hw_dom->ctrl_val[idx];
+       return hw_dom->mbps_val[idx];
+}
+
+static void show_doms(struct seq_file *s, struct resctrl_schema *schema, int closid)
 {
+       struct rdt_resource *r = schema->res;
        struct rdt_domain *dom;
        bool sep = false;
        u32 ctrl_val;
 
-       seq_printf(s, "%*s:", max_name_width, r->name);
+       seq_printf(s, "%*s:", max_name_width, schema->name);
        list_for_each_entry(dom, &r->domains, list) {
                if (sep)
                        seq_puts(s, ";");
 
-               ctrl_val = (!is_mba_sc(r) ? dom->ctrl_val[closid] :
-                           dom->mbps_val[closid]);
+               ctrl_val = resctrl_arch_get_config(r, dom, closid,
+                                                  schema->conf_type);
                seq_printf(s, r->format_str, dom->id, max_data_width,
                           ctrl_val);
                sep = true;
@@ -394,16 +462,17 @@ static void show_doms(struct seq_file *s, struct rdt_resource *r, int closid)
 int rdtgroup_schemata_show(struct kernfs_open_file *of,
                           struct seq_file *s, void *v)
 {
+       struct resctrl_schema *schema;
        struct rdtgroup *rdtgrp;
-       struct rdt_resource *r;
        int ret = 0;
        u32 closid;
 
        rdtgrp = rdtgroup_kn_lock_live(of->kn);
        if (rdtgrp) {
                if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
-                       for_each_alloc_enabled_rdt_resource(r)
-                               seq_printf(s, "%s:uninitialized\n", r->name);
+                       list_for_each_entry(schema, &resctrl_schema_all, list) {
+                               seq_printf(s, "%s:uninitialized\n", schema->name);
+                       }
                } else if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
                        if (!rdtgrp->plr->d) {
                                rdt_last_cmd_clear();
@@ -411,15 +480,15 @@ int rdtgroup_schemata_show(struct kernfs_open_file *of,
                                ret = -ENODEV;
                        } else {
                                seq_printf(s, "%s:%d=%x\n",
-                                          rdtgrp->plr->r->name,
+                                          rdtgrp->plr->s->res->name,
                                           rdtgrp->plr->d->id,
                                           rdtgrp->plr->cbm);
                        }
                } else {
                        closid = rdtgrp->closid;
-                       for_each_alloc_enabled_rdt_resource(r) {
-                               if (closid < r->num_closid)
-                                       show_doms(s, r, closid);
+                       list_for_each_entry(schema, &resctrl_schema_all, list) {
+                               if (closid < schema->num_closid)
+                                       show_doms(s, schema, closid);
                        }
                }
        } else {
@@ -449,6 +518,7 @@ void mon_event_read(struct rmid_read *rr, struct rdt_resource *r,
 int rdtgroup_mondata_show(struct seq_file *m, void *arg)
 {
        struct kernfs_open_file *of = m->private;
+       struct rdt_hw_resource *hw_res;
        u32 resid, evtid, domid;
        struct rdtgroup *rdtgrp;
        struct rdt_resource *r;
@@ -468,7 +538,8 @@ int rdtgroup_mondata_show(struct seq_file *m, void *arg)
        domid = md.u.domid;
        evtid = md.u.evtid;
 
-       r = &rdt_resources_all[resid];
+       hw_res = &rdt_resources_all[resid];
+       r = &hw_res->r_resctrl;
        d = rdt_find_domain(r, domid, NULL);
        if (IS_ERR_OR_NULL(d)) {
                ret = -ENOENT;
@@ -482,7 +553,7 @@ int rdtgroup_mondata_show(struct seq_file *m, void *arg)
        else if (rr.val & RMID_VAL_UNAVAIL)
                seq_puts(m, "Unavailable\n");
        else
-               seq_printf(m, "%llu\n", rr.val * r->mon_scale);
+               seq_printf(m, "%llu\n", rr.val * hw_res->mon_scale);
 
 out:
        rdtgroup_kn_unlock(of->kn);
index 6a5f60a..1d64718 100644 (file)
@@ -2,6 +2,7 @@
 #ifndef _ASM_X86_RESCTRL_INTERNAL_H
 #define _ASM_X86_RESCTRL_INTERNAL_H
 
+#include <linux/resctrl.h>
 #include <linux/sched.h>
 #include <linux/kernfs.h>
 #include <linux/fs_context.h>
@@ -109,6 +110,7 @@ extern unsigned int resctrl_cqm_threshold;
 extern bool rdt_alloc_capable;
 extern bool rdt_mon_capable;
 extern unsigned int rdt_mon_features;
+extern struct list_head resctrl_schema_all;
 
 enum rdt_group_type {
        RDTCTRL_GROUP = 0,
@@ -161,8 +163,8 @@ struct mongroup {
 
 /**
  * struct pseudo_lock_region - pseudo-lock region information
- * @r:                 RDT resource to which this pseudo-locked region
- *                     belongs
+ * @s:                 Resctrl schema for the resource to which this
+ *                     pseudo-locked region belongs
  * @d:                 RDT domain to which this pseudo-locked region
  *                     belongs
  * @cbm:               bitmask of the pseudo-locked region
@@ -182,7 +184,7 @@ struct mongroup {
  * @pm_reqs:           Power management QoS requests related to this region
  */
 struct pseudo_lock_region {
-       struct rdt_resource     *r;
+       struct resctrl_schema   *s;
        struct rdt_domain       *d;
        u32                     cbm;
        wait_queue_head_t       lock_thread_wq;
@@ -303,44 +305,25 @@ struct mbm_state {
 };
 
 /**
- * struct rdt_domain - group of cpus sharing an RDT resource
- * @list:      all instances of this resource
- * @id:                unique id for this instance
- * @cpu_mask:  which cpus share this resource
- * @rmid_busy_llc:
- *             bitmap of which limbo RMIDs are above threshold
- * @mbm_total: saved state for MBM total bandwidth
- * @mbm_local: saved state for MBM local bandwidth
- * @mbm_over:  worker to periodically read MBM h/w counters
- * @cqm_limbo: worker to periodically read CQM h/w counters
- * @mbm_work_cpu:
- *             worker cpu for MBM h/w counters
- * @cqm_work_cpu:
- *             worker cpu for CQM h/w counters
+ * struct rdt_hw_domain - Arch private attributes of a set of CPUs that share
+ *                       a resource
+ * @d_resctrl: Properties exposed to the resctrl file system
  * @ctrl_val:  array of cache or mem ctrl values (indexed by CLOSID)
  * @mbps_val:  When mba_sc is enabled, this holds the bandwidth in MBps
- * @new_ctrl:  new ctrl value to be loaded
- * @have_new_ctrl: did user provide new_ctrl for this domain
- * @plr:       pseudo-locked region (if any) associated with domain
+ *
+ * Members of this structure are accessed via helpers that provide abstraction.
  */
-struct rdt_domain {
-       struct list_head                list;
-       int                             id;
-       struct cpumask                  cpu_mask;
-       unsigned long                   *rmid_busy_llc;
-       struct mbm_state                *mbm_total;
-       struct mbm_state                *mbm_local;
-       struct delayed_work             mbm_over;
-       struct delayed_work             cqm_limbo;
-       int                             mbm_work_cpu;
-       int                             cqm_work_cpu;
+struct rdt_hw_domain {
+       struct rdt_domain               d_resctrl;
        u32                             *ctrl_val;
        u32                             *mbps_val;
-       u32                             new_ctrl;
-       bool                            have_new_ctrl;
-       struct pseudo_lock_region       *plr;
 };
 
+static inline struct rdt_hw_domain *resctrl_to_arch_dom(struct rdt_domain *r)
+{
+       return container_of(r, struct rdt_hw_domain, d_resctrl);
+}
+
 /**
  * struct msr_param - set a range of MSRs from a domain
  * @res:       The resource to use
@@ -349,69 +332,8 @@ struct rdt_domain {
  */
 struct msr_param {
        struct rdt_resource     *res;
-       int                     low;
-       int                     high;
-};
-
-/**
- * struct rdt_cache - Cache allocation related data
- * @cbm_len:           Length of the cache bit mask
- * @min_cbm_bits:      Minimum number of consecutive bits to be set
- * @cbm_idx_mult:      Multiplier of CBM index
- * @cbm_idx_offset:    Offset of CBM index. CBM index is computed by:
- *                     closid * cbm_idx_multi + cbm_idx_offset
- *                     in a cache bit mask
- * @shareable_bits:    Bitmask of shareable resource with other
- *                     executing entities
- * @arch_has_sparse_bitmaps:   True if a bitmap like f00f is valid.
- * @arch_has_empty_bitmaps:    True if the '0' bitmap is valid.
- * @arch_has_per_cpu_cfg:      True if QOS_CFG register for this cache
- *                             level has CPU scope.
- */
-struct rdt_cache {
-       unsigned int    cbm_len;
-       unsigned int    min_cbm_bits;
-       unsigned int    cbm_idx_mult;
-       unsigned int    cbm_idx_offset;
-       unsigned int    shareable_bits;
-       bool            arch_has_sparse_bitmaps;
-       bool            arch_has_empty_bitmaps;
-       bool            arch_has_per_cpu_cfg;
-};
-
-/**
- * enum membw_throttle_mode - System's memory bandwidth throttling mode
- * @THREAD_THROTTLE_UNDEFINED: Not relevant to the system
- * @THREAD_THROTTLE_MAX:       Memory bandwidth is throttled at the core
- *                             always using smallest bandwidth percentage
- *                             assigned to threads, aka "max throttling"
- * @THREAD_THROTTLE_PER_THREAD:        Memory bandwidth is throttled at the thread
- */
-enum membw_throttle_mode {
-       THREAD_THROTTLE_UNDEFINED = 0,
-       THREAD_THROTTLE_MAX,
-       THREAD_THROTTLE_PER_THREAD,
-};
-
-/**
- * struct rdt_membw - Memory bandwidth allocation related data
- * @min_bw:            Minimum memory bandwidth percentage user can request
- * @bw_gran:           Granularity at which the memory bandwidth is allocated
- * @delay_linear:      True if memory B/W delay is in linear scale
- * @arch_needs_linear: True if we can't configure non-linear resources
- * @throttle_mode:     Bandwidth throttling mode when threads request
- *                     different memory bandwidths
- * @mba_sc:            True if MBA software controller(mba_sc) is enabled
- * @mb_map:            Mapping of memory B/W percentage to memory B/W delay
- */
-struct rdt_membw {
-       u32                             min_bw;
-       u32                             bw_gran;
-       u32                             delay_linear;
-       bool                            arch_needs_linear;
-       enum membw_throttle_mode        throttle_mode;
-       bool                            mba_sc;
-       u32                             *mb_map;
+       u32                     low;
+       u32                     high;
 };
 
 static inline bool is_llc_occupancy_enabled(void)
@@ -446,111 +368,103 @@ struct rdt_parse_data {
 };
 
 /**
- * struct rdt_resource - attributes of an RDT resource
- * @rid:               The index of the resource
- * @alloc_enabled:     Is allocation enabled on this machine
- * @mon_enabled:       Is monitoring enabled for this feature
- * @alloc_capable:     Is allocation available on this machine
- * @mon_capable:       Is monitor feature available on this machine
- * @name:              Name to use in "schemata" file
- * @num_closid:                Number of CLOSIDs available
- * @cache_level:       Which cache level defines scope of this resource
- * @default_ctrl:      Specifies default cache cbm or memory B/W percent.
+ * struct rdt_hw_resource - arch private attributes of a resctrl resource
+ * @r_resctrl:         Attributes of the resource used directly by resctrl.
+ * @num_closid:                Maximum number of closid this hardware can support,
+ *                     regardless of CDP. This is exposed via
+ *                     resctrl_arch_get_num_closid() to avoid confusion
+ *                     with struct resctrl_schema's property of the same name,
+ *                     which has been corrected for features like CDP.
  * @msr_base:          Base MSR address for CBMs
  * @msr_update:                Function pointer to update QOS MSRs
- * @data_width:                Character width of data when displaying
- * @domains:           All domains for this resource
- * @cache:             Cache allocation related data
- * @membw:             If the component has bandwidth controls, their properties.
- * @format_str:                Per resource format string to show domain value
- * @parse_ctrlval:     Per resource function pointer to parse control values
- * @evt_list:          List of monitoring events
- * @num_rmid:          Number of RMIDs available
  * @mon_scale:         cqm counter * mon_scale = occupancy in bytes
  * @mbm_width:         Monitor width, to detect and correct for overflow.
- * @fflags:            flags to choose base and info files
+ * @cdp_enabled:       CDP state of this resource
+ *
+ * Members of this structure are either private to the architecture
+ * e.g. mbm_width, or accessed via helpers that provide abstraction. e.g.
+ * msr_update and msr_base.
  */
-struct rdt_resource {
-       int                     rid;
-       bool                    alloc_enabled;
-       bool                    mon_enabled;
-       bool                    alloc_capable;
-       bool                    mon_capable;
-       char                    *name;
-       int                     num_closid;
-       int                     cache_level;
-       u32                     default_ctrl;
+struct rdt_hw_resource {
+       struct rdt_resource     r_resctrl;
+       u32                     num_closid;
        unsigned int            msr_base;
        void (*msr_update)      (struct rdt_domain *d, struct msr_param *m,
                                 struct rdt_resource *r);
-       int                     data_width;
-       struct list_head        domains;
-       struct rdt_cache        cache;
-       struct rdt_membw        membw;
-       const char              *format_str;
-       int (*parse_ctrlval)(struct rdt_parse_data *data,
-                            struct rdt_resource *r,
-                            struct rdt_domain *d);
-       struct list_head        evt_list;
-       int                     num_rmid;
        unsigned int            mon_scale;
        unsigned int            mbm_width;
-       unsigned long           fflags;
+       bool                    cdp_enabled;
 };
 
-int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
+static inline struct rdt_hw_resource *resctrl_to_arch_res(struct rdt_resource *r)
+{
+       return container_of(r, struct rdt_hw_resource, r_resctrl);
+}
+
+int parse_cbm(struct rdt_parse_data *data, struct resctrl_schema *s,
              struct rdt_domain *d);
-int parse_bw(struct rdt_parse_data *data, struct rdt_resource *r,
+int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s,
             struct rdt_domain *d);
 
 extern struct mutex rdtgroup_mutex;
 
-extern struct rdt_resource rdt_resources_all[];
+extern struct rdt_hw_resource rdt_resources_all[];
 extern struct rdtgroup rdtgroup_default;
 DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key);
 
 extern struct dentry *debugfs_resctrl;
 
-enum {
+enum resctrl_res_level {
        RDT_RESOURCE_L3,
-       RDT_RESOURCE_L3DATA,
-       RDT_RESOURCE_L3CODE,
        RDT_RESOURCE_L2,
-       RDT_RESOURCE_L2DATA,
-       RDT_RESOURCE_L2CODE,
        RDT_RESOURCE_MBA,
 
        /* Must be the last */
        RDT_NUM_RESOURCES,
 };
 
+static inline struct rdt_resource *resctrl_inc(struct rdt_resource *res)
+{
+       struct rdt_hw_resource *hw_res = resctrl_to_arch_res(res);
+
+       hw_res++;
+       return &hw_res->r_resctrl;
+}
+
+static inline bool resctrl_arch_get_cdp_enabled(enum resctrl_res_level l)
+{
+       return rdt_resources_all[l].cdp_enabled;
+}
+
+int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable);
+
+/*
+ * To return the common struct rdt_resource, which is contained in struct
+ * rdt_hw_resource, walk the resctrl member of struct rdt_hw_resource.
+ */
 #define for_each_rdt_resource(r)                                             \
-       for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
-            r++)
+       for (r = &rdt_resources_all[0].r_resctrl;                             \
+            r <= &rdt_resources_all[RDT_NUM_RESOURCES - 1].r_resctrl;        \
+            r = resctrl_inc(r))
 
 #define for_each_capable_rdt_resource(r)                                     \
-       for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
-            r++)                                                             \
+       for_each_rdt_resource(r)                                              \
                if (r->alloc_capable || r->mon_capable)
 
 #define for_each_alloc_capable_rdt_resource(r)                               \
-       for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
-            r++)                                                             \
+       for_each_rdt_resource(r)                                              \
                if (r->alloc_capable)
 
 #define for_each_mon_capable_rdt_resource(r)                                 \
-       for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
-            r++)                                                             \
+       for_each_rdt_resource(r)                                              \
                if (r->mon_capable)
 
 #define for_each_alloc_enabled_rdt_resource(r)                               \
-       for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
-            r++)                                                             \
+       for_each_rdt_resource(r)                                              \
                if (r->alloc_enabled)
 
 #define for_each_mon_enabled_rdt_resource(r)                                 \
-       for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
-            r++)                                                             \
+       for_each_rdt_resource(r)                                              \
                if (r->mon_enabled)
 
 /* CPUID.(EAX=10H, ECX=ResID=1).EAX */
@@ -594,7 +508,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
                                char *buf, size_t nbytes, loff_t off);
 int rdtgroup_schemata_show(struct kernfs_open_file *of,
                           struct seq_file *s, void *v);
-bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
+bool rdtgroup_cbm_overlaps(struct resctrl_schema *s, struct rdt_domain *d,
                           unsigned long cbm, int closid, bool exclusive);
 unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, struct rdt_domain *d,
                                  unsigned long cbm);
@@ -609,7 +523,6 @@ void rdt_pseudo_lock_release(void);
 int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp);
 void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp);
 struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r);
-int update_domains(struct rdt_resource *r, int closid);
 int closids_supported(void);
 void closid_free(int closid);
 int alloc_rmid(void);
index f07c10b..c9f0f3d 100644 (file)
@@ -174,7 +174,7 @@ void __check_limbo(struct rdt_domain *d, bool force_free)
        struct rdt_resource *r;
        u32 crmid = 1, nrmid;
 
-       r = &rdt_resources_all[RDT_RESOURCE_L3];
+       r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
 
        /*
         * Skip RMID 0 and start from RMID 1 and check all the RMIDs that
@@ -232,7 +232,7 @@ static void add_rmid_to_limbo(struct rmid_entry *entry)
        int cpu;
        u64 val;
 
-       r = &rdt_resources_all[RDT_RESOURCE_L3];
+       r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
 
        entry->busy = 0;
        cpu = get_cpu();
@@ -285,15 +285,15 @@ static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr, unsigned int width)
        return chunks >>= shift;
 }
 
-static int __mon_event_count(u32 rmid, struct rmid_read *rr)
+static u64 __mon_event_count(u32 rmid, struct rmid_read *rr)
 {
+       struct rdt_hw_resource *hw_res = resctrl_to_arch_res(rr->r);
        struct mbm_state *m;
        u64 chunks, tval;
 
        tval = __rmid_read(rmid, rr->evtid);
        if (tval & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL)) {
-               rr->val = tval;
-               return -EINVAL;
+               return tval;
        }
        switch (rr->evtid) {
        case QOS_L3_OCCUP_EVENT_ID:
@@ -307,10 +307,10 @@ static int __mon_event_count(u32 rmid, struct rmid_read *rr)
                break;
        default:
                /*
-                * Code would never reach here because
-                * an invalid event id would fail the __rmid_read.
+                * Code would never reach here because an invalid
+                * event id would fail the __rmid_read.
                 */
-               return -EINVAL;
+               return RMID_VAL_ERROR;
        }
 
        if (rr->first) {
@@ -319,7 +319,7 @@ static int __mon_event_count(u32 rmid, struct rmid_read *rr)
                return 0;
        }
 
-       chunks = mbm_overflow_count(m->prev_msr, tval, rr->r->mbm_width);
+       chunks = mbm_overflow_count(m->prev_msr, tval, hw_res->mbm_width);
        m->chunks += chunks;
        m->prev_msr = tval;
 
@@ -334,7 +334,7 @@ static int __mon_event_count(u32 rmid, struct rmid_read *rr)
  */
 static void mbm_bw_count(u32 rmid, struct rmid_read *rr)
 {
-       struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3];
+       struct rdt_hw_resource *hw_res = resctrl_to_arch_res(rr->r);
        struct mbm_state *m = &rr->d->mbm_local[rmid];
        u64 tval, cur_bw, chunks;
 
@@ -342,8 +342,8 @@ static void mbm_bw_count(u32 rmid, struct rmid_read *rr)
        if (tval & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL))
                return;
 
-       chunks = mbm_overflow_count(m->prev_bw_msr, tval, rr->r->mbm_width);
-       cur_bw = (get_corrected_mbm_count(rmid, chunks) * r->mon_scale) >> 20;
+       chunks = mbm_overflow_count(m->prev_bw_msr, tval, hw_res->mbm_width);
+       cur_bw = (get_corrected_mbm_count(rmid, chunks) * hw_res->mon_scale) >> 20;
 
        if (m->delta_comp)
                m->delta_bw = abs(cur_bw - m->prev_bw);
@@ -361,23 +361,29 @@ void mon_event_count(void *info)
        struct rdtgroup *rdtgrp, *entry;
        struct rmid_read *rr = info;
        struct list_head *head;
+       u64 ret_val;
 
        rdtgrp = rr->rgrp;
 
-       if (__mon_event_count(rdtgrp->mon.rmid, rr))
-               return;
+       ret_val = __mon_event_count(rdtgrp->mon.rmid, rr);
 
        /*
-        * For Ctrl groups read data from child monitor groups.
+        * For Ctrl groups read data from child monitor groups and
+        * add them together. Count events which are read successfully.
+        * Discard the rmid_read's reporting errors.
         */
        head = &rdtgrp->mon.crdtgrp_list;
 
        if (rdtgrp->type == RDTCTRL_GROUP) {
                list_for_each_entry(entry, head, mon.crdtgrp_list) {
-                       if (__mon_event_count(entry->mon.rmid, rr))
-                               return;
+                       if (__mon_event_count(entry->mon.rmid, rr) == 0)
+                               ret_val = 0;
                }
        }
+
+       /* Report error if none of rmid_reads are successful */
+       if (ret_val)
+               rr->val = ret_val;
 }
 
 /*
@@ -416,6 +422,8 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
 {
        u32 closid, rmid, cur_msr, cur_msr_val, new_msr_val;
        struct mbm_state *pmbm_data, *cmbm_data;
+       struct rdt_hw_resource *hw_r_mba;
+       struct rdt_hw_domain *hw_dom_mba;
        u32 cur_bw, delta_bw, user_bw;
        struct rdt_resource *r_mba;
        struct rdt_domain *dom_mba;
@@ -425,7 +433,8 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
        if (!is_mbm_local_enabled())
                return;
 
-       r_mba = &rdt_resources_all[RDT_RESOURCE_MBA];
+       hw_r_mba = &rdt_resources_all[RDT_RESOURCE_MBA];
+       r_mba = &hw_r_mba->r_resctrl;
        closid = rgrp->closid;
        rmid = rgrp->mon.rmid;
        pmbm_data = &dom_mbm->mbm_local[rmid];
@@ -435,11 +444,16 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
                pr_warn_once("Failure to get domain for MBA update\n");
                return;
        }
+       hw_dom_mba = resctrl_to_arch_dom(dom_mba);
 
        cur_bw = pmbm_data->prev_bw;
-       user_bw = dom_mba->mbps_val[closid];
+       user_bw = resctrl_arch_get_config(r_mba, dom_mba, closid, CDP_NONE);
        delta_bw = pmbm_data->delta_bw;
-       cur_msr_val = dom_mba->ctrl_val[closid];
+       /*
+        * resctrl_arch_get_config() chooses the mbps/ctrl value to return
+        * based on is_mba_sc(). For now, reach into the hw_dom.
+        */
+       cur_msr_val = hw_dom_mba->ctrl_val[closid];
 
        /*
         * For Ctrl groups read data from child monitor groups.
@@ -474,9 +488,9 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
                return;
        }
 
-       cur_msr = r_mba->msr_base + closid;
+       cur_msr = hw_r_mba->msr_base + closid;
        wrmsrl(cur_msr, delay_bw_map(new_msr_val, r_mba));
-       dom_mba->ctrl_val[closid] = new_msr_val;
+       hw_dom_mba->ctrl_val[closid] = new_msr_val;
 
        /*
         * Delta values are updated dynamically package wise for each
@@ -538,7 +552,7 @@ void cqm_handle_limbo(struct work_struct *work)
 
        mutex_lock(&rdtgroup_mutex);
 
-       r = &rdt_resources_all[RDT_RESOURCE_L3];
+       r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
        d = container_of(work, struct rdt_domain, cqm_limbo.work);
 
        __check_limbo(d, false);
@@ -574,7 +588,7 @@ void mbm_handle_overflow(struct work_struct *work)
        if (!static_branch_likely(&rdt_mon_enable_key))
                goto out_unlock;
 
-       r = &rdt_resources_all[RDT_RESOURCE_L3];
+       r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
        d = container_of(work, struct rdt_domain, mbm_over.work);
 
        list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
@@ -671,15 +685,16 @@ static void l3_mon_evt_init(struct rdt_resource *r)
 int rdt_get_mon_l3_config(struct rdt_resource *r)
 {
        unsigned int mbm_offset = boot_cpu_data.x86_cache_mbm_width_offset;
+       struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
        unsigned int cl_size = boot_cpu_data.x86_cache_size;
        int ret;
 
-       r->mon_scale = boot_cpu_data.x86_cache_occ_scale;
+       hw_res->mon_scale = boot_cpu_data.x86_cache_occ_scale;
        r->num_rmid = boot_cpu_data.x86_cache_max_rmid + 1;
-       r->mbm_width = MBM_CNTR_WIDTH_BASE;
+       hw_res->mbm_width = MBM_CNTR_WIDTH_BASE;
 
        if (mbm_offset > 0 && mbm_offset <= MBM_CNTR_WIDTH_OFFSET_MAX)
-               r->mbm_width += mbm_offset;
+               hw_res->mbm_width += mbm_offset;
        else if (mbm_offset > MBM_CNTR_WIDTH_OFFSET_MAX)
                pr_warn("Ignoring impossible MBM counter offset\n");
 
@@ -693,7 +708,7 @@ int rdt_get_mon_l3_config(struct rdt_resource *r)
        resctrl_cqm_threshold = cl_size * 1024 / r->num_rmid;
 
        /* h/w works in units of "boot_cpu_data.x86_cache_occ_scale" */
-       resctrl_cqm_threshold /= r->mon_scale;
+       resctrl_cqm_threshold /= hw_res->mon_scale;
 
        ret = dom_data_init(r);
        if (ret)
index 2207916..db813f8 100644 (file)
@@ -250,7 +250,7 @@ static void pseudo_lock_region_clear(struct pseudo_lock_region *plr)
        plr->line_size = 0;
        kfree(plr->kmem);
        plr->kmem = NULL;
-       plr->r = NULL;
+       plr->s = NULL;
        if (plr->d)
                plr->d->plr = NULL;
        plr->d = NULL;
@@ -294,10 +294,10 @@ static int pseudo_lock_region_init(struct pseudo_lock_region *plr)
 
        ci = get_cpu_cacheinfo(plr->cpu);
 
-       plr->size = rdtgroup_cbm_to_size(plr->r, plr->d, plr->cbm);
+       plr->size = rdtgroup_cbm_to_size(plr->s->res, plr->d, plr->cbm);
 
        for (i = 0; i < ci->num_leaves; i++) {
-               if (ci->info_list[i].level == plr->r->cache_level) {
+               if (ci->info_list[i].level == plr->s->res->cache_level) {
                        plr->line_size = ci->info_list[i].coherency_line_size;
                        return 0;
                }
@@ -688,8 +688,8 @@ int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp)
         *   resource, the portion of cache used by it should be made
         *   unavailable to all future allocations from both resources.
         */
-       if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled ||
-           rdt_resources_all[RDT_RESOURCE_L2DATA].alloc_enabled) {
+       if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3) ||
+           resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2)) {
                rdt_last_cmd_puts("CDP enabled\n");
                return -EINVAL;
        }
@@ -800,7 +800,7 @@ bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm
        unsigned long cbm_b;
 
        if (d->plr) {
-               cbm_len = d->plr->r->cache.cbm_len;
+               cbm_len = d->plr->s->res->cache.cbm_len;
                cbm_b = d->plr->cbm;
                if (bitmap_intersects(&cbm, &cbm_b, cbm_len))
                        return true;
index 01fd30e..b57b3db 100644 (file)
@@ -39,6 +39,9 @@ static struct kernfs_root *rdt_root;
 struct rdtgroup rdtgroup_default;
 LIST_HEAD(rdt_all_groups);
 
+/* list of entries for the schemata file */
+LIST_HEAD(resctrl_schema_all);
+
 /* Kernel fs node for "info" directory under root */
 static struct kernfs_node *kn_info;
 
@@ -100,12 +103,12 @@ int closids_supported(void)
 
 static void closid_init(void)
 {
-       struct rdt_resource *r;
-       int rdt_min_closid = 32;
+       struct resctrl_schema *s;
+       u32 rdt_min_closid = 32;
 
        /* Compute rdt_min_closid across all resources */
-       for_each_alloc_enabled_rdt_resource(r)
-               rdt_min_closid = min(rdt_min_closid, r->num_closid);
+       list_for_each_entry(s, &resctrl_schema_all, list)
+               rdt_min_closid = min(rdt_min_closid, s->num_closid);
 
        closid_free_map = BIT_MASK(rdt_min_closid) - 1;
 
@@ -842,16 +845,17 @@ static int rdt_last_cmd_status_show(struct kernfs_open_file *of,
 static int rdt_num_closids_show(struct kernfs_open_file *of,
                                struct seq_file *seq, void *v)
 {
-       struct rdt_resource *r = of->kn->parent->priv;
+       struct resctrl_schema *s = of->kn->parent->priv;
 
-       seq_printf(seq, "%d\n", r->num_closid);
+       seq_printf(seq, "%u\n", s->num_closid);
        return 0;
 }
 
 static int rdt_default_ctrl_show(struct kernfs_open_file *of,
                             struct seq_file *seq, void *v)
 {
-       struct rdt_resource *r = of->kn->parent->priv;
+       struct resctrl_schema *s = of->kn->parent->priv;
+       struct rdt_resource *r = s->res;
 
        seq_printf(seq, "%x\n", r->default_ctrl);
        return 0;
@@ -860,7 +864,8 @@ static int rdt_default_ctrl_show(struct kernfs_open_file *of,
 static int rdt_min_cbm_bits_show(struct kernfs_open_file *of,
                             struct seq_file *seq, void *v)
 {
-       struct rdt_resource *r = of->kn->parent->priv;
+       struct resctrl_schema *s = of->kn->parent->priv;
+       struct rdt_resource *r = s->res;
 
        seq_printf(seq, "%u\n", r->cache.min_cbm_bits);
        return 0;
@@ -869,7 +874,8 @@ static int rdt_min_cbm_bits_show(struct kernfs_open_file *of,
 static int rdt_shareable_bits_show(struct kernfs_open_file *of,
                                   struct seq_file *seq, void *v)
 {
-       struct rdt_resource *r = of->kn->parent->priv;
+       struct resctrl_schema *s = of->kn->parent->priv;
+       struct rdt_resource *r = s->res;
 
        seq_printf(seq, "%x\n", r->cache.shareable_bits);
        return 0;
@@ -892,38 +898,40 @@ static int rdt_shareable_bits_show(struct kernfs_open_file *of,
 static int rdt_bit_usage_show(struct kernfs_open_file *of,
                              struct seq_file *seq, void *v)
 {
-       struct rdt_resource *r = of->kn->parent->priv;
+       struct resctrl_schema *s = of->kn->parent->priv;
        /*
         * Use unsigned long even though only 32 bits are used to ensure
         * test_bit() is used safely.
         */
        unsigned long sw_shareable = 0, hw_shareable = 0;
        unsigned long exclusive = 0, pseudo_locked = 0;
+       struct rdt_resource *r = s->res;
        struct rdt_domain *dom;
        int i, hwb, swb, excl, psl;
        enum rdtgrp_mode mode;
        bool sep = false;
-       u32 *ctrl;
+       u32 ctrl_val;
 
        mutex_lock(&rdtgroup_mutex);
        hw_shareable = r->cache.shareable_bits;
        list_for_each_entry(dom, &r->domains, list) {
                if (sep)
                        seq_putc(seq, ';');
-               ctrl = dom->ctrl_val;
                sw_shareable = 0;
                exclusive = 0;
                seq_printf(seq, "%d=", dom->id);
-               for (i = 0; i < closids_supported(); i++, ctrl++) {
+               for (i = 0; i < closids_supported(); i++) {
                        if (!closid_allocated(i))
                                continue;
+                       ctrl_val = resctrl_arch_get_config(r, dom, i,
+                                                          s->conf_type);
                        mode = rdtgroup_mode_by_closid(i);
                        switch (mode) {
                        case RDT_MODE_SHAREABLE:
-                               sw_shareable |= *ctrl;
+                               sw_shareable |= ctrl_val;
                                break;
                        case RDT_MODE_EXCLUSIVE:
-                               exclusive |= *ctrl;
+                               exclusive |= ctrl_val;
                                break;
                        case RDT_MODE_PSEUDO_LOCKSETUP:
                        /*
@@ -970,7 +978,8 @@ static int rdt_bit_usage_show(struct kernfs_open_file *of,
 static int rdt_min_bw_show(struct kernfs_open_file *of,
                             struct seq_file *seq, void *v)
 {
-       struct rdt_resource *r = of->kn->parent->priv;
+       struct resctrl_schema *s = of->kn->parent->priv;
+       struct rdt_resource *r = s->res;
 
        seq_printf(seq, "%u\n", r->membw.min_bw);
        return 0;
@@ -1001,7 +1010,8 @@ static int rdt_mon_features_show(struct kernfs_open_file *of,
 static int rdt_bw_gran_show(struct kernfs_open_file *of,
                             struct seq_file *seq, void *v)
 {
-       struct rdt_resource *r = of->kn->parent->priv;
+       struct resctrl_schema *s = of->kn->parent->priv;
+       struct rdt_resource *r = s->res;
 
        seq_printf(seq, "%u\n", r->membw.bw_gran);
        return 0;
@@ -1010,7 +1020,8 @@ static int rdt_bw_gran_show(struct kernfs_open_file *of,
 static int rdt_delay_linear_show(struct kernfs_open_file *of,
                             struct seq_file *seq, void *v)
 {
-       struct rdt_resource *r = of->kn->parent->priv;
+       struct resctrl_schema *s = of->kn->parent->priv;
+       struct rdt_resource *r = s->res;
 
        seq_printf(seq, "%u\n", r->membw.delay_linear);
        return 0;
@@ -1020,8 +1031,9 @@ static int max_threshold_occ_show(struct kernfs_open_file *of,
                                  struct seq_file *seq, void *v)
 {
        struct rdt_resource *r = of->kn->parent->priv;
+       struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
 
-       seq_printf(seq, "%u\n", resctrl_cqm_threshold * r->mon_scale);
+       seq_printf(seq, "%u\n", resctrl_cqm_threshold * hw_res->mon_scale);
 
        return 0;
 }
@@ -1029,7 +1041,8 @@ static int max_threshold_occ_show(struct kernfs_open_file *of,
 static int rdt_thread_throttle_mode_show(struct kernfs_open_file *of,
                                         struct seq_file *seq, void *v)
 {
-       struct rdt_resource *r = of->kn->parent->priv;
+       struct resctrl_schema *s = of->kn->parent->priv;
+       struct rdt_resource *r = s->res;
 
        if (r->membw.throttle_mode == THREAD_THROTTLE_PER_THREAD)
                seq_puts(seq, "per-thread\n");
@@ -1042,7 +1055,7 @@ static int rdt_thread_throttle_mode_show(struct kernfs_open_file *of,
 static ssize_t max_threshold_occ_write(struct kernfs_open_file *of,
                                       char *buf, size_t nbytes, loff_t off)
 {
-       struct rdt_resource *r = of->kn->parent->priv;
+       struct rdt_hw_resource *hw_res;
        unsigned int bytes;
        int ret;
 
@@ -1053,7 +1066,8 @@ static ssize_t max_threshold_occ_write(struct kernfs_open_file *of,
        if (bytes > (boot_cpu_data.x86_cache_size * 1024))
                return -EINVAL;
 
-       resctrl_cqm_threshold = bytes / r->mon_scale;
+       hw_res = resctrl_to_arch_res(of->kn->parent->priv);
+       resctrl_cqm_threshold = bytes / hw_res->mon_scale;
 
        return nbytes;
 }
@@ -1078,76 +1092,17 @@ static int rdtgroup_mode_show(struct kernfs_open_file *of,
        return 0;
 }
 
-/**
- * rdt_cdp_peer_get - Retrieve CDP peer if it exists
- * @r: RDT resource to which RDT domain @d belongs
- * @d: Cache instance for which a CDP peer is requested
- * @r_cdp: RDT resource that shares hardware with @r (RDT resource peer)
- *         Used to return the result.
- * @d_cdp: RDT domain that shares hardware with @d (RDT domain peer)
- *         Used to return the result.
- *
- * RDT resources are managed independently and by extension the RDT domains
- * (RDT resource instances) are managed independently also. The Code and
- * Data Prioritization (CDP) RDT resources, while managed independently,
- * could refer to the same underlying hardware. For example,
- * RDT_RESOURCE_L2CODE and RDT_RESOURCE_L2DATA both refer to the L2 cache.
- *
- * When provided with an RDT resource @r and an instance of that RDT
- * resource @d rdt_cdp_peer_get() will return if there is a peer RDT
- * resource and the exact instance that shares the same hardware.
- *
- * Return: 0 if a CDP peer was found, <0 on error or if no CDP peer exists.
- *         If a CDP peer was found, @r_cdp will point to the peer RDT resource
- *         and @d_cdp will point to the peer RDT domain.
- */
-static int rdt_cdp_peer_get(struct rdt_resource *r, struct rdt_domain *d,
-                           struct rdt_resource **r_cdp,
-                           struct rdt_domain **d_cdp)
+static enum resctrl_conf_type resctrl_peer_type(enum resctrl_conf_type my_type)
 {
-       struct rdt_resource *_r_cdp = NULL;
-       struct rdt_domain *_d_cdp = NULL;
-       int ret = 0;
-
-       switch (r->rid) {
-       case RDT_RESOURCE_L3DATA:
-               _r_cdp = &rdt_resources_all[RDT_RESOURCE_L3CODE];
-               break;
-       case RDT_RESOURCE_L3CODE:
-               _r_cdp =  &rdt_resources_all[RDT_RESOURCE_L3DATA];
-               break;
-       case RDT_RESOURCE_L2DATA:
-               _r_cdp =  &rdt_resources_all[RDT_RESOURCE_L2CODE];
-               break;
-       case RDT_RESOURCE_L2CODE:
-               _r_cdp =  &rdt_resources_all[RDT_RESOURCE_L2DATA];
-               break;
+       switch (my_type) {
+       case CDP_CODE:
+               return CDP_DATA;
+       case CDP_DATA:
+               return CDP_CODE;
        default:
-               ret = -ENOENT;
-               goto out;
-       }
-
-       /*
-        * When a new CPU comes online and CDP is enabled then the new
-        * RDT domains (if any) associated with both CDP RDT resources
-        * are added in the same CPU online routine while the
-        * rdtgroup_mutex is held. It should thus not happen for one
-        * RDT domain to exist and be associated with its RDT CDP
-        * resource but there is no RDT domain associated with the
-        * peer RDT CDP resource. Hence the WARN.
-        */
-       _d_cdp = rdt_find_domain(_r_cdp, d->id, NULL);
-       if (WARN_ON(IS_ERR_OR_NULL(_d_cdp))) {
-               _r_cdp = NULL;
-               _d_cdp = NULL;
-               ret = -EINVAL;
+       case CDP_NONE:
+               return CDP_NONE;
        }
-
-out:
-       *r_cdp = _r_cdp;
-       *d_cdp = _d_cdp;
-
-       return ret;
 }
 
 /**
@@ -1171,11 +1126,11 @@ out:
  * Return: false if CBM does not overlap, true if it does.
  */
 static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
-                                   unsigned long cbm, int closid, bool exclusive)
+                                   unsigned long cbm, int closid,
+                                   enum resctrl_conf_type type, bool exclusive)
 {
        enum rdtgrp_mode mode;
        unsigned long ctrl_b;
-       u32 *ctrl;
        int i;
 
        /* Check for any overlap with regions used by hardware directly */
@@ -1186,9 +1141,8 @@ static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d
        }
 
        /* Check for overlap with other resource groups */
-       ctrl = d->ctrl_val;
-       for (i = 0; i < closids_supported(); i++, ctrl++) {
-               ctrl_b = *ctrl;
+       for (i = 0; i < closids_supported(); i++) {
+               ctrl_b = resctrl_arch_get_config(r, d, i, type);
                mode = rdtgroup_mode_by_closid(i);
                if (closid_allocated(i) && i != closid &&
                    mode != RDT_MODE_PSEUDO_LOCKSETUP) {
@@ -1208,7 +1162,7 @@ static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d
 
 /**
  * rdtgroup_cbm_overlaps - Does CBM overlap with other use of hardware
- * @r: Resource to which domain instance @d belongs.
+ * @s: Schema for the resource to which domain instance @d belongs.
  * @d: The domain instance for which @closid is being tested.
  * @cbm: Capacity bitmask being tested.
  * @closid: Intended closid for @cbm.
@@ -1226,19 +1180,19 @@ static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d
  *
  * Return: true if CBM overlap detected, false if there is no overlap
  */
-bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
+bool rdtgroup_cbm_overlaps(struct resctrl_schema *s, struct rdt_domain *d,
                           unsigned long cbm, int closid, bool exclusive)
 {
-       struct rdt_resource *r_cdp;
-       struct rdt_domain *d_cdp;
+       enum resctrl_conf_type peer_type = resctrl_peer_type(s->conf_type);
+       struct rdt_resource *r = s->res;
 
-       if (__rdtgroup_cbm_overlaps(r, d, cbm, closid, exclusive))
+       if (__rdtgroup_cbm_overlaps(r, d, cbm, closid, s->conf_type,
+                                   exclusive))
                return true;
 
-       if (rdt_cdp_peer_get(r, d, &r_cdp, &d_cdp) < 0)
+       if (!resctrl_arch_get_cdp_enabled(r->rid))
                return false;
-
-       return  __rdtgroup_cbm_overlaps(r_cdp, d_cdp, cbm, closid, exclusive);
+       return  __rdtgroup_cbm_overlaps(r, d, cbm, closid, peer_type, exclusive);
 }
 
 /**
@@ -1256,17 +1210,21 @@ bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
 static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp)
 {
        int closid = rdtgrp->closid;
+       struct resctrl_schema *s;
        struct rdt_resource *r;
        bool has_cache = false;
        struct rdt_domain *d;
+       u32 ctrl;
 
-       for_each_alloc_enabled_rdt_resource(r) {
+       list_for_each_entry(s, &resctrl_schema_all, list) {
+               r = s->res;
                if (r->rid == RDT_RESOURCE_MBA)
                        continue;
                has_cache = true;
                list_for_each_entry(d, &r->domains, list) {
-                       if (rdtgroup_cbm_overlaps(r, d, d->ctrl_val[closid],
-                                                 rdtgrp->closid, false)) {
+                       ctrl = resctrl_arch_get_config(r, d, closid,
+                                                      s->conf_type);
+                       if (rdtgroup_cbm_overlaps(s, d, ctrl, closid, false)) {
                                rdt_last_cmd_puts("Schemata overlaps\n");
                                return false;
                        }
@@ -1397,6 +1355,7 @@ unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r,
 static int rdtgroup_size_show(struct kernfs_open_file *of,
                              struct seq_file *s, void *v)
 {
+       struct resctrl_schema *schema;
        struct rdtgroup *rdtgrp;
        struct rdt_resource *r;
        struct rdt_domain *d;
@@ -1418,8 +1377,8 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,
                        ret = -ENODEV;
                } else {
                        seq_printf(s, "%*s:", max_name_width,
-                                  rdtgrp->plr->r->name);
-                       size = rdtgroup_cbm_to_size(rdtgrp->plr->r,
+                                  rdtgrp->plr->s->name);
+                       size = rdtgroup_cbm_to_size(rdtgrp->plr->s->res,
                                                    rdtgrp->plr->d,
                                                    rdtgrp->plr->cbm);
                        seq_printf(s, "%d=%u\n", rdtgrp->plr->d->id, size);
@@ -1427,18 +1386,19 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,
                goto out;
        }
 
-       for_each_alloc_enabled_rdt_resource(r) {
+       list_for_each_entry(schema, &resctrl_schema_all, list) {
+               r = schema->res;
                sep = false;
-               seq_printf(s, "%*s:", max_name_width, r->name);
+               seq_printf(s, "%*s:", max_name_width, schema->name);
                list_for_each_entry(d, &r->domains, list) {
                        if (sep)
                                seq_putc(s, ';');
                        if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
                                size = 0;
                        } else {
-                               ctrl = (!is_mba_sc(r) ?
-                                               d->ctrl_val[rdtgrp->closid] :
-                                               d->mbps_val[rdtgrp->closid]);
+                               ctrl = resctrl_arch_get_config(r, d,
+                                                              rdtgrp->closid,
+                                                              schema->conf_type);
                                if (r->rid == RDT_RESOURCE_MBA)
                                        size = ctrl;
                                else
@@ -1757,14 +1717,14 @@ int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name,
        return ret;
 }
 
-static int rdtgroup_mkdir_info_resdir(struct rdt_resource *r, char *name,
+static int rdtgroup_mkdir_info_resdir(void *priv, char *name,
                                      unsigned long fflags)
 {
        struct kernfs_node *kn_subdir;
        int ret;
 
        kn_subdir = kernfs_create_dir(kn_info, name,
-                                     kn_info->mode, r);
+                                     kn_info->mode, priv);
        if (IS_ERR(kn_subdir))
                return PTR_ERR(kn_subdir);
 
@@ -1781,6 +1741,7 @@ static int rdtgroup_mkdir_info_resdir(struct rdt_resource *r, char *name,
 
 static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn)
 {
+       struct resctrl_schema *s;
        struct rdt_resource *r;
        unsigned long fflags;
        char name[32];
@@ -1795,9 +1756,11 @@ static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn)
        if (ret)
                goto out_destroy;
 
-       for_each_alloc_enabled_rdt_resource(r) {
+       /* loop over enabled controls, these are all alloc_enabled */
+       list_for_each_entry(s, &resctrl_schema_all, list) {
+               r = s->res;
                fflags =  r->fflags | RF_CTRL_INFO;
-               ret = rdtgroup_mkdir_info_resdir(r, r->name, fflags);
+               ret = rdtgroup_mkdir_info_resdir(s, s->name, fflags);
                if (ret)
                        goto out_destroy;
        }
@@ -1867,7 +1830,7 @@ static void l2_qos_cfg_update(void *arg)
 
 static inline bool is_mba_linear(void)
 {
-       return rdt_resources_all[RDT_RESOURCE_MBA].membw.delay_linear;
+       return rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl.membw.delay_linear;
 }
 
 static int set_cache_qos_cfg(int level, bool enable)
@@ -1888,7 +1851,7 @@ static int set_cache_qos_cfg(int level, bool enable)
        if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
                return -ENOMEM;
 
-       r_l = &rdt_resources_all[level];
+       r_l = &rdt_resources_all[level].r_resctrl;
        list_for_each_entry(d, &r_l->domains, list) {
                if (r_l->cache.arch_has_per_cpu_cfg)
                        /* Pick all the CPUs in the domain instance */
@@ -1914,14 +1877,16 @@ static int set_cache_qos_cfg(int level, bool enable)
 /* Restore the qos cfg state when a domain comes online */
 void rdt_domain_reconfigure_cdp(struct rdt_resource *r)
 {
-       if (!r->alloc_capable)
+       struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
+
+       if (!r->cdp_capable)
                return;
 
-       if (r == &rdt_resources_all[RDT_RESOURCE_L2DATA])
-               l2_qos_cfg_update(&r->alloc_enabled);
+       if (r->rid == RDT_RESOURCE_L2)
+               l2_qos_cfg_update(&hw_res->cdp_enabled);
 
-       if (r == &rdt_resources_all[RDT_RESOURCE_L3DATA])
-               l3_qos_cfg_update(&r->alloc_enabled);
+       if (r->rid == RDT_RESOURCE_L3)
+               l3_qos_cfg_update(&hw_res->cdp_enabled);
 }
 
 /*
@@ -1932,7 +1897,8 @@ void rdt_domain_reconfigure_cdp(struct rdt_resource *r)
  */
 static int set_mba_sc(bool mba_sc)
 {
-       struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_MBA];
+       struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl;
+       struct rdt_hw_domain *hw_dom;
        struct rdt_domain *d;
 
        if (!is_mbm_enabled() || !is_mba_linear() ||
@@ -1940,73 +1906,60 @@ static int set_mba_sc(bool mba_sc)
                return -EINVAL;
 
        r->membw.mba_sc = mba_sc;
-       list_for_each_entry(d, &r->domains, list)
-               setup_default_ctrlval(r, d->ctrl_val, d->mbps_val);
+       list_for_each_entry(d, &r->domains, list) {
+               hw_dom = resctrl_to_arch_dom(d);
+               setup_default_ctrlval(r, hw_dom->ctrl_val, hw_dom->mbps_val);
+       }
 
        return 0;
 }
 
-static int cdp_enable(int level, int data_type, int code_type)
+static int cdp_enable(int level)
 {
-       struct rdt_resource *r_ldata = &rdt_resources_all[data_type];
-       struct rdt_resource *r_lcode = &rdt_resources_all[code_type];
-       struct rdt_resource *r_l = &rdt_resources_all[level];
+       struct rdt_resource *r_l = &rdt_resources_all[level].r_resctrl;
        int ret;
 
-       if (!r_l->alloc_capable || !r_ldata->alloc_capable ||
-           !r_lcode->alloc_capable)
+       if (!r_l->alloc_capable)
                return -EINVAL;
 
        ret = set_cache_qos_cfg(level, true);
-       if (!ret) {
-               r_l->alloc_enabled = false;
-               r_ldata->alloc_enabled = true;
-               r_lcode->alloc_enabled = true;
-       }
+       if (!ret)
+               rdt_resources_all[level].cdp_enabled = true;
+
        return ret;
 }
 
-static int cdpl3_enable(void)
+static void cdp_disable(int level)
 {
-       return cdp_enable(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA,
-                         RDT_RESOURCE_L3CODE);
-}
+       struct rdt_hw_resource *r_hw = &rdt_resources_all[level];
 
-static int cdpl2_enable(void)
-{
-       return cdp_enable(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA,
-                         RDT_RESOURCE_L2CODE);
+       if (r_hw->cdp_enabled) {
+               set_cache_qos_cfg(level, false);
+               r_hw->cdp_enabled = false;
+       }
 }
 
-static void cdp_disable(int level, int data_type, int code_type)
+int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable)
 {
-       struct rdt_resource *r = &rdt_resources_all[level];
+       struct rdt_hw_resource *hw_res = &rdt_resources_all[l];
 
-       r->alloc_enabled = r->alloc_capable;
+       if (!hw_res->r_resctrl.cdp_capable)
+               return -EINVAL;
 
-       if (rdt_resources_all[data_type].alloc_enabled) {
-               rdt_resources_all[data_type].alloc_enabled = false;
-               rdt_resources_all[code_type].alloc_enabled = false;
-               set_cache_qos_cfg(level, false);
-       }
-}
+       if (enable)
+               return cdp_enable(l);
 
-static void cdpl3_disable(void)
-{
-       cdp_disable(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA, RDT_RESOURCE_L3CODE);
-}
+       cdp_disable(l);
 
-static void cdpl2_disable(void)
-{
-       cdp_disable(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA, RDT_RESOURCE_L2CODE);
+       return 0;
 }
 
 static void cdp_disable_all(void)
 {
-       if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled)
-               cdpl3_disable();
-       if (rdt_resources_all[RDT_RESOURCE_L2DATA].alloc_enabled)
-               cdpl2_disable();
+       if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3))
+               resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, false);
+       if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2))
+               resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, false);
 }
 
 /*
@@ -2084,10 +2037,10 @@ static int rdt_enable_ctx(struct rdt_fs_context *ctx)
        int ret = 0;
 
        if (ctx->enable_cdpl2)
-               ret = cdpl2_enable();
+               ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, true);
 
        if (!ret && ctx->enable_cdpl3)
-               ret = cdpl3_enable();
+               ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, true);
 
        if (!ret && ctx->enable_mba_mbps)
                ret = set_mba_sc(true);
@@ -2095,6 +2048,92 @@ static int rdt_enable_ctx(struct rdt_fs_context *ctx)
        return ret;
 }
 
+static int schemata_list_add(struct rdt_resource *r, enum resctrl_conf_type type)
+{
+       struct resctrl_schema *s;
+       const char *suffix = "";
+       int ret, cl;
+
+       s = kzalloc(sizeof(*s), GFP_KERNEL);
+       if (!s)
+               return -ENOMEM;
+
+       s->res = r;
+       s->num_closid = resctrl_arch_get_num_closid(r);
+       if (resctrl_arch_get_cdp_enabled(r->rid))
+               s->num_closid /= 2;
+
+       s->conf_type = type;
+       switch (type) {
+       case CDP_CODE:
+               suffix = "CODE";
+               break;
+       case CDP_DATA:
+               suffix = "DATA";
+               break;
+       case CDP_NONE:
+               suffix = "";
+               break;
+       }
+
+       ret = snprintf(s->name, sizeof(s->name), "%s%s", r->name, suffix);
+       if (ret >= sizeof(s->name)) {
+               kfree(s);
+               return -EINVAL;
+       }
+
+       cl = strlen(s->name);
+
+       /*
+        * If CDP is supported by this resource, but not enabled,
+        * include the suffix. This ensures the tabular format of the
+        * schemata file does not change between mounts of the filesystem.
+        */
+       if (r->cdp_capable && !resctrl_arch_get_cdp_enabled(r->rid))
+               cl += 4;
+
+       if (cl > max_name_width)
+               max_name_width = cl;
+
+       INIT_LIST_HEAD(&s->list);
+       list_add(&s->list, &resctrl_schema_all);
+
+       return 0;
+}
+
+static int schemata_list_create(void)
+{
+       struct rdt_resource *r;
+       int ret = 0;
+
+       for_each_alloc_enabled_rdt_resource(r) {
+               if (resctrl_arch_get_cdp_enabled(r->rid)) {
+                       ret = schemata_list_add(r, CDP_CODE);
+                       if (ret)
+                               break;
+
+                       ret = schemata_list_add(r, CDP_DATA);
+               } else {
+                       ret = schemata_list_add(r, CDP_NONE);
+               }
+
+               if (ret)
+                       break;
+       }
+
+       return ret;
+}
+
+static void schemata_list_destroy(void)
+{
+       struct resctrl_schema *s, *tmp;
+
+       list_for_each_entry_safe(s, tmp, &resctrl_schema_all, list) {
+               list_del(&s->list);
+               kfree(s);
+       }
+}
+
 static int rdt_get_tree(struct fs_context *fc)
 {
        struct rdt_fs_context *ctx = rdt_fc2context(fc);
@@ -2116,11 +2155,17 @@ static int rdt_get_tree(struct fs_context *fc)
        if (ret < 0)
                goto out_cdp;
 
+       ret = schemata_list_create();
+       if (ret) {
+               schemata_list_destroy();
+               goto out_mba;
+       }
+
        closid_init();
 
        ret = rdtgroup_create_info_dir(rdtgroup_default.kn);
        if (ret < 0)
-               goto out_mba;
+               goto out_schemata_free;
 
        if (rdt_mon_capable) {
                ret = mongroup_create_dir(rdtgroup_default.kn,
@@ -2153,7 +2198,7 @@ static int rdt_get_tree(struct fs_context *fc)
                static_branch_enable_cpuslocked(&rdt_enable_key);
 
        if (is_mbm_enabled()) {
-               r = &rdt_resources_all[RDT_RESOURCE_L3];
+               r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
                list_for_each_entry(dom, &r->domains, list)
                        mbm_setup_overflow_handler(dom, MBM_OVERFLOW_INTERVAL);
        }
@@ -2170,6 +2215,8 @@ out_mongrp:
                kernfs_remove(kn_mongrp);
 out_info:
        kernfs_remove(kn_info);
+out_schemata_free:
+       schemata_list_destroy();
 out_mba:
        if (ctx->enable_mba_mbps)
                set_mba_sc(false);
@@ -2257,6 +2304,8 @@ static int rdt_init_fs_context(struct fs_context *fc)
 
 static int reset_all_ctrls(struct rdt_resource *r)
 {
+       struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
+       struct rdt_hw_domain *hw_dom;
        struct msr_param msr_param;
        cpumask_var_t cpu_mask;
        struct rdt_domain *d;
@@ -2267,7 +2316,7 @@ static int reset_all_ctrls(struct rdt_resource *r)
 
        msr_param.res = r;
        msr_param.low = 0;
-       msr_param.high = r->num_closid;
+       msr_param.high = hw_res->num_closid;
 
        /*
         * Disable resource control for this resource by setting all
@@ -2275,10 +2324,11 @@ static int reset_all_ctrls(struct rdt_resource *r)
         * from each domain to update the MSRs below.
         */
        list_for_each_entry(d, &r->domains, list) {
+               hw_dom = resctrl_to_arch_dom(d);
                cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
 
-               for (i = 0; i < r->num_closid; i++)
-                       d->ctrl_val[i] = r->default_ctrl;
+               for (i = 0; i < hw_res->num_closid; i++)
+                       hw_dom->ctrl_val[i] = r->default_ctrl;
        }
        cpu = get_cpu();
        /* Update CBM on this cpu if it's in cpu_mask. */
@@ -2408,6 +2458,7 @@ static void rdt_kill_sb(struct super_block *sb)
        rmdir_all_sub();
        rdt_pseudo_lock_release();
        rdtgroup_default.mode = RDT_MODE_SHAREABLE;
+       schemata_list_destroy();
        static_branch_disable_cpuslocked(&rdt_alloc_enable_key);
        static_branch_disable_cpuslocked(&rdt_mon_enable_key);
        static_branch_disable_cpuslocked(&rdt_enable_key);
@@ -2642,23 +2693,24 @@ static u32 cbm_ensure_valid(u32 _val, struct rdt_resource *r)
  * Set the RDT domain up to start off with all usable allocations. That is,
  * all shareable and unused bits. All-zero CBM is invalid.
  */
-static int __init_one_rdt_domain(struct rdt_domain *d, struct rdt_resource *r,
+static int __init_one_rdt_domain(struct rdt_domain *d, struct resctrl_schema *s,
                                 u32 closid)
 {
-       struct rdt_resource *r_cdp = NULL;
-       struct rdt_domain *d_cdp = NULL;
+       enum resctrl_conf_type peer_type = resctrl_peer_type(s->conf_type);
+       enum resctrl_conf_type t = s->conf_type;
+       struct resctrl_staged_config *cfg;
+       struct rdt_resource *r = s->res;
        u32 used_b = 0, unused_b = 0;
        unsigned long tmp_cbm;
        enum rdtgrp_mode mode;
-       u32 peer_ctl, *ctrl;
+       u32 peer_ctl, ctrl_val;
        int i;
 
-       rdt_cdp_peer_get(r, d, &r_cdp, &d_cdp);
-       d->have_new_ctrl = false;
-       d->new_ctrl = r->cache.shareable_bits;
+       cfg = &d->staged_config[t];
+       cfg->have_new_ctrl = false;
+       cfg->new_ctrl = r->cache.shareable_bits;
        used_b = r->cache.shareable_bits;
-       ctrl = d->ctrl_val;
-       for (i = 0; i < closids_supported(); i++, ctrl++) {
+       for (i = 0; i < closids_supported(); i++) {
                if (closid_allocated(i) && i != closid) {
                        mode = rdtgroup_mode_by_closid(i);
                        if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
@@ -2673,35 +2725,38 @@ static int __init_one_rdt_domain(struct rdt_domain *d, struct rdt_resource *r,
                         * usage to ensure there is no overlap
                         * with an exclusive group.
                         */
-                       if (d_cdp)
-                               peer_ctl = d_cdp->ctrl_val[i];
+                       if (resctrl_arch_get_cdp_enabled(r->rid))
+                               peer_ctl = resctrl_arch_get_config(r, d, i,
+                                                                  peer_type);
                        else
                                peer_ctl = 0;
-                       used_b |= *ctrl | peer_ctl;
+                       ctrl_val = resctrl_arch_get_config(r, d, i,
+                                                          s->conf_type);
+                       used_b |= ctrl_val | peer_ctl;
                        if (mode == RDT_MODE_SHAREABLE)
-                               d->new_ctrl |= *ctrl | peer_ctl;
+                               cfg->new_ctrl |= ctrl_val | peer_ctl;
                }
        }
        if (d->plr && d->plr->cbm > 0)
                used_b |= d->plr->cbm;
        unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1);
        unused_b &= BIT_MASK(r->cache.cbm_len) - 1;
-       d->new_ctrl |= unused_b;
+       cfg->new_ctrl |= unused_b;
        /*
         * Force the initial CBM to be valid, user can
         * modify the CBM based on system availability.
         */
-       d->new_ctrl = cbm_ensure_valid(d->new_ctrl, r);
+       cfg->new_ctrl = cbm_ensure_valid(cfg->new_ctrl, r);
        /*
         * Assign the u32 CBM to an unsigned long to ensure that
         * bitmap_weight() does not access out-of-bound memory.
         */
-       tmp_cbm = d->new_ctrl;
+       tmp_cbm = cfg->new_ctrl;
        if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) < r->cache.min_cbm_bits) {
-               rdt_last_cmd_printf("No space on %s:%d\n", r->name, d->id);
+               rdt_last_cmd_printf("No space on %s:%d\n", s->name, d->id);
                return -ENOSPC;
        }
-       d->have_new_ctrl = true;
+       cfg->have_new_ctrl = true;
 
        return 0;
 }
@@ -2716,13 +2771,13 @@ static int __init_one_rdt_domain(struct rdt_domain *d, struct rdt_resource *r,
  * If there are no more shareable bits available on any domain then
  * the entire allocation will fail.
  */
-static int rdtgroup_init_cat(struct rdt_resource *r, u32 closid)
+static int rdtgroup_init_cat(struct resctrl_schema *s, u32 closid)
 {
        struct rdt_domain *d;
        int ret;
 
-       list_for_each_entry(d, &r->domains, list) {
-               ret = __init_one_rdt_domain(d, r, closid);
+       list_for_each_entry(d, &s->res->domains, list) {
+               ret = __init_one_rdt_domain(d, s, closid);
                if (ret < 0)
                        return ret;
        }
@@ -2733,30 +2788,34 @@ static int rdtgroup_init_cat(struct rdt_resource *r, u32 closid)
 /* Initialize MBA resource with default values. */
 static void rdtgroup_init_mba(struct rdt_resource *r)
 {
+       struct resctrl_staged_config *cfg;
        struct rdt_domain *d;
 
        list_for_each_entry(d, &r->domains, list) {
-               d->new_ctrl = is_mba_sc(r) ? MBA_MAX_MBPS : r->default_ctrl;
-               d->have_new_ctrl = true;
+               cfg = &d->staged_config[CDP_NONE];
+               cfg->new_ctrl = is_mba_sc(r) ? MBA_MAX_MBPS : r->default_ctrl;
+               cfg->have_new_ctrl = true;
        }
 }
 
 /* Initialize the RDT group's allocations. */
 static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
 {
+       struct resctrl_schema *s;
        struct rdt_resource *r;
        int ret;
 
-       for_each_alloc_enabled_rdt_resource(r) {
+       list_for_each_entry(s, &resctrl_schema_all, list) {
+               r = s->res;
                if (r->rid == RDT_RESOURCE_MBA) {
                        rdtgroup_init_mba(r);
                } else {
-                       ret = rdtgroup_init_cat(r, rdtgrp->closid);
+                       ret = rdtgroup_init_cat(s, rdtgrp->closid);
                        if (ret < 0)
                                return ret;
                }
 
-               ret = update_domains(r, rdtgrp->closid);
+               ret = resctrl_arch_update_domains(r, rdtgrp->closid);
                if (ret < 0) {
                        rdt_last_cmd_puts("Failed to initialize allocations\n");
                        return ret;
@@ -3124,13 +3183,13 @@ out:
 
 static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf)
 {
-       if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled)
+       if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3))
                seq_puts(seq, ",cdp");
 
-       if (rdt_resources_all[RDT_RESOURCE_L2DATA].alloc_enabled)
+       if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2))
                seq_puts(seq, ",cdpl2");
 
-       if (is_mba_sc(&rdt_resources_all[RDT_RESOURCE_MBA]))
+       if (is_mba_sc(&rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl))
                seq_puts(seq, ",mba_MBps");
 
        return 0;
index 08651a4..42fc41d 100644 (file)
@@ -508,7 +508,7 @@ static struct irq_chip hpet_msi_controller __ro_after_init = {
        .irq_set_affinity = msi_domain_set_affinity,
        .irq_retrigger = irq_chip_retrigger_hierarchy,
        .irq_write_msi_msg = hpet_msi_write_msg,
-       .flags = IRQCHIP_SKIP_SET_WAKE,
+       .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_AFFINITY_PRE_STARTUP,
 };
 
 static int hpet_msi_init(struct irq_domain *domain,
index 282b4ee..15aefa3 100644 (file)
@@ -235,15 +235,15 @@ static char irq_trigger[2];
  */
 static void restore_ELCR(char *trigger)
 {
-       outb(trigger[0], 0x4d0);
-       outb(trigger[1], 0x4d1);
+       outb(trigger[0], PIC_ELCR1);
+       outb(trigger[1], PIC_ELCR2);
 }
 
 static void save_ELCR(char *trigger)
 {
        /* IRQ 0,1,2,8,13 are marked as reserved */
-       trigger[0] = inb(0x4d0) & 0xF8;
-       trigger[1] = inb(0x4d1) & 0xDE;
+       trigger[0] = inb(PIC_ELCR1) & 0xF8;
+       trigger[1] = inb(PIC_ELCR2) & 0xDE;
 }
 
 static void i8259A_resume(void)
index 8f06449..fed721f 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/smp.h>
 #include <linux/pci.h>
 
+#include <asm/i8259.h>
 #include <asm/io_apic.h>
 #include <asm/acpi.h>
 #include <asm/irqdomain.h>
@@ -251,7 +252,7 @@ static int __init ELCR_trigger(unsigned int irq)
 {
        unsigned int port;
 
-       port = 0x4d0 + (irq >> 3);
+       port = PIC_ELCR1 + (irq >> 3);
        return (inb(port) >> (irq & 7)) & 1;
 }
 
index ebfb911..0a40df6 100644 (file)
@@ -388,10 +388,11 @@ static const struct dmi_system_id reboot_dmi_table[] __initconst = {
        },
        {       /* Handle problems with rebooting on the OptiPlex 990. */
                .callback = set_pci_reboot,
-               .ident = "Dell OptiPlex 990",
+               .ident = "Dell OptiPlex 990 BIOS A0x",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
                        DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 990"),
+                       DMI_MATCH(DMI_BIOS_VERSION, "A0"),
                },
        },
        {       /* Handle problems with rebooting on Dell 300's */
index 9320285..85f6e24 100644 (file)
@@ -610,6 +610,9 @@ void set_cpu_sibling_map(int cpu)
        if (threads > __max_smt_threads)
                __max_smt_threads = threads;
 
+       for_each_cpu(i, topology_sibling_cpumask(cpu))
+               cpu_data(i).smt_active = threads > 1;
+
        /*
         * This needs a separate iteration over the cpus because we rely on all
         * topology_sibling_cpumask links to be set-up.
@@ -1552,8 +1555,13 @@ static void remove_siblinginfo(int cpu)
 
        for_each_cpu(sibling, topology_die_cpumask(cpu))
                cpumask_clear_cpu(cpu, topology_die_cpumask(sibling));
-       for_each_cpu(sibling, topology_sibling_cpumask(cpu))
+
+       for_each_cpu(sibling, topology_sibling_cpumask(cpu)) {
                cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
+               if (cpumask_weight(topology_sibling_cpumask(sibling)) == 1)
+                       cpu_data(sibling).smt_active = false;
+       }
+
        for_each_cpu(sibling, cpu_llc_shared_mask(cpu))
                cpumask_clear_cpu(cpu, cpu_llc_shared_mask(sibling));
        cpumask_clear(cpu_llc_shared_mask(cpu));
index 739be5d..fe03bd9 100644 (file)
@@ -208,30 +208,6 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
        kvm_mmu_after_set_cpuid(vcpu);
 }
 
-static int is_efer_nx(void)
-{
-       return host_efer & EFER_NX;
-}
-
-static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
-{
-       int i;
-       struct kvm_cpuid_entry2 *e, *entry;
-
-       entry = NULL;
-       for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
-               e = &vcpu->arch.cpuid_entries[i];
-               if (e->function == 0x80000001) {
-                       entry = e;
-                       break;
-               }
-       }
-       if (entry && cpuid_entry_has(entry, X86_FEATURE_NX) && !is_efer_nx()) {
-               cpuid_entry_clear(entry, X86_FEATURE_NX);
-               printk(KERN_INFO "kvm: guest NX capability removed\n");
-       }
-}
-
 int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu)
 {
        struct kvm_cpuid_entry2 *best;
@@ -302,7 +278,6 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
        vcpu->arch.cpuid_entries = e2;
        vcpu->arch.cpuid_nent = cpuid->nent;
 
-       cpuid_fix_nx_cap(vcpu);
        kvm_update_cpuid_runtime(vcpu);
        kvm_vcpu_after_set_cpuid(vcpu);
 
@@ -401,7 +376,6 @@ static __always_inline void kvm_cpu_cap_mask(enum cpuid_leafs leaf, u32 mask)
 
 void kvm_set_cpu_caps(void)
 {
-       unsigned int f_nx = is_efer_nx() ? F(NX) : 0;
 #ifdef CONFIG_X86_64
        unsigned int f_gbpages = F(GBPAGES);
        unsigned int f_lm = F(LM);
@@ -515,7 +489,7 @@ void kvm_set_cpu_caps(void)
                F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
                F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
                F(PAT) | F(PSE36) | 0 /* Reserved */ |
-               f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
+               F(NX) | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
                F(FXSR) | F(FXSR_OPT) | f_gbpages | F(RDTSCP) |
                0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW)
        );
index b07592c..41d2a53 100644 (file)
@@ -1933,7 +1933,7 @@ ret_success:
 void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu)
 {
        struct kvm_cpuid_entry2 *entry;
-       struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
+       struct kvm_vcpu_hv *hv_vcpu;
 
        entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_INTERFACE, 0);
        if (entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX) {
@@ -2016,6 +2016,7 @@ static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
 
 static int kvm_hv_hypercall_complete(struct kvm_vcpu *vcpu, u64 result)
 {
+       trace_kvm_hv_hypercall_done(result);
        kvm_hv_hypercall_set_result(vcpu, result);
        ++vcpu->stat.hypercalls;
        return kvm_skip_emulated_instruction(vcpu);
@@ -2139,6 +2140,7 @@ static bool hv_check_hypercall_access(struct kvm_vcpu_hv *hv_vcpu, u16 code)
 
 int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
 {
+       struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
        struct kvm_hv_hcall hc;
        u64 ret = HV_STATUS_SUCCESS;
 
@@ -2173,17 +2175,25 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
        hc.rep_idx = (hc.param >> HV_HYPERCALL_REP_START_OFFSET) & 0xfff;
        hc.rep = !!(hc.rep_cnt || hc.rep_idx);
 
-       if (hc.fast && is_xmm_fast_hypercall(&hc))
-               kvm_hv_hypercall_read_xmm(&hc);
-
        trace_kvm_hv_hypercall(hc.code, hc.fast, hc.rep_cnt, hc.rep_idx,
                               hc.ingpa, hc.outgpa);
 
-       if (unlikely(!hv_check_hypercall_access(to_hv_vcpu(vcpu), hc.code))) {
+       if (unlikely(!hv_check_hypercall_access(hv_vcpu, hc.code))) {
                ret = HV_STATUS_ACCESS_DENIED;
                goto hypercall_complete;
        }
 
+       if (hc.fast && is_xmm_fast_hypercall(&hc)) {
+               if (unlikely(hv_vcpu->enforce_cpuid &&
+                            !(hv_vcpu->cpuid_cache.features_edx &
+                              HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE))) {
+                       kvm_queue_exception(vcpu, UD_VECTOR);
+                       return 1;
+               }
+
+               kvm_hv_hypercall_read_xmm(&hc);
+       }
+
        switch (hc.code) {
        case HVCALL_NOTIFY_LONG_SPIN_WAIT:
                if (unlikely(hc.rep)) {
index 629a09c..0b80263 100644 (file)
@@ -541,17 +541,17 @@ static int picdev_slave_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
                            addr, len, val);
 }
 
-static int picdev_eclr_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
+static int picdev_elcr_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
                             gpa_t addr, int len, const void *val)
 {
-       return picdev_write(container_of(dev, struct kvm_pic, dev_eclr),
+       return picdev_write(container_of(dev, struct kvm_pic, dev_elcr),
                            addr, len, val);
 }
 
-static int picdev_eclr_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
+static int picdev_elcr_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
                            gpa_t addr, int len, void *val)
 {
-       return picdev_read(container_of(dev, struct kvm_pic, dev_eclr),
+       return picdev_read(container_of(dev, struct kvm_pic, dev_elcr),
                            addr, len, val);
 }
 
@@ -577,9 +577,9 @@ static const struct kvm_io_device_ops picdev_slave_ops = {
        .write    = picdev_slave_write,
 };
 
-static const struct kvm_io_device_ops picdev_eclr_ops = {
-       .read     = picdev_eclr_read,
-       .write    = picdev_eclr_write,
+static const struct kvm_io_device_ops picdev_elcr_ops = {
+       .read     = picdev_elcr_read,
+       .write    = picdev_elcr_write,
 };
 
 int kvm_pic_init(struct kvm *kvm)
@@ -602,7 +602,7 @@ int kvm_pic_init(struct kvm *kvm)
         */
        kvm_iodevice_init(&s->dev_master, &picdev_master_ops);
        kvm_iodevice_init(&s->dev_slave, &picdev_slave_ops);
-       kvm_iodevice_init(&s->dev_eclr, &picdev_eclr_ops);
+       kvm_iodevice_init(&s->dev_elcr, &picdev_elcr_ops);
        mutex_lock(&kvm->slots_lock);
        ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, 0x20, 2,
                                      &s->dev_master);
@@ -613,7 +613,7 @@ int kvm_pic_init(struct kvm *kvm)
        if (ret < 0)
                goto fail_unreg_2;
 
-       ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, 0x4d0, 2, &s->dev_eclr);
+       ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, 0x4d0, 2, &s->dev_elcr);
        if (ret < 0)
                goto fail_unreg_1;
 
@@ -647,7 +647,7 @@ void kvm_pic_destroy(struct kvm *kvm)
        mutex_lock(&kvm->slots_lock);
        kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_master);
        kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_slave);
-       kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_eclr);
+       kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_elcr);
        mutex_unlock(&kvm->slots_lock);
 
        kvm->arch.vpic = NULL;
index 698969e..ff005fe 100644 (file)
@@ -96,7 +96,7 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
 static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic)
 {
        ioapic->rtc_status.pending_eoi = 0;
-       bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID);
+       bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID + 1);
 }
 
 static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic);
index 6604017..11e4065 100644 (file)
@@ -43,13 +43,13 @@ struct kvm_vcpu;
 
 struct dest_map {
        /* vcpu bitmap where IRQ has been sent */
-       DECLARE_BITMAP(map, KVM_MAX_VCPU_ID);
+       DECLARE_BITMAP(map, KVM_MAX_VCPU_ID + 1);
 
        /*
         * Vector sent to a given vcpu, only valid when
         * the vcpu's bit in map is set
         */
-       u8 vectors[KVM_MAX_VCPU_ID];
+       u8 vectors[KVM_MAX_VCPU_ID + 1];
 };
 
 
index 9b64abf..650642b 100644 (file)
@@ -55,7 +55,7 @@ struct kvm_pic {
        int output;             /* intr from master PIC */
        struct kvm_io_device dev_master;
        struct kvm_io_device dev_slave;
-       struct kvm_io_device dev_eclr;
+       struct kvm_io_device dev_elcr;
        void (*ack_notifier)(void *opaque, int irq);
        unsigned long irq_states[PIC_NUM_PINS];
 };
index 66f7f5b..47b7652 100644 (file)
@@ -1644,7 +1644,7 @@ static int is_empty_shadow_page(u64 *spt)
  * aggregate version in order to make the slab shrinker
  * faster
  */
-static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, unsigned long nr)
+static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, long nr)
 {
        kvm->arch.n_used_mmu_pages += nr;
        percpu_counter_add(&kvm_total_used_mmu_pages, nr);
@@ -2535,6 +2535,7 @@ static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync)
 {
        struct kvm_mmu_page *sp;
+       bool locked = false;
 
        /*
         * Force write-protection if the page is being tracked.  Note, the page
@@ -2557,9 +2558,34 @@ int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync)
                if (sp->unsync)
                        continue;
 
+               /*
+                * TDP MMU page faults require an additional spinlock as they
+                * run with mmu_lock held for read, not write, and the unsync
+                * logic is not thread safe.  Take the spinklock regardless of
+                * the MMU type to avoid extra conditionals/parameters, there's
+                * no meaningful penalty if mmu_lock is held for write.
+                */
+               if (!locked) {
+                       locked = true;
+                       spin_lock(&vcpu->kvm->arch.mmu_unsync_pages_lock);
+
+                       /*
+                        * Recheck after taking the spinlock, a different vCPU
+                        * may have since marked the page unsync.  A false
+                        * positive on the unprotected check above is not
+                        * possible as clearing sp->unsync _must_ hold mmu_lock
+                        * for write, i.e. unsync cannot transition from 0->1
+                        * while this CPU holds mmu_lock for read (or write).
+                        */
+                       if (READ_ONCE(sp->unsync))
+                               continue;
+               }
+
                WARN_ON(sp->role.level != PG_LEVEL_4K);
                kvm_unsync_page(vcpu, sp);
        }
+       if (locked)
+               spin_unlock(&vcpu->kvm->arch.mmu_unsync_pages_lock);
 
        /*
         * We need to ensure that the marking of unsync pages is visible
@@ -5537,6 +5563,8 @@ void kvm_mmu_init_vm(struct kvm *kvm)
 {
        struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
 
+       spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);
+
        if (!kvm_mmu_init_tdp_mmu(kvm))
                /*
                 * No smp_load/store wrappers needed here as we are in
index 0853370..d80cb12 100644 (file)
@@ -43,6 +43,7 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
        if (!kvm->arch.tdp_mmu_enabled)
                return;
 
+       WARN_ON(!list_empty(&kvm->arch.tdp_mmu_pages));
        WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
 
        /*
@@ -81,8 +82,6 @@ static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
 void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
                          bool shared)
 {
-       gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
-
        kvm_lockdep_assert_mmu_lock_held(kvm, shared);
 
        if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
@@ -94,7 +93,7 @@ void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
        list_del_rcu(&root->link);
        spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
 
-       zap_gfn_range(kvm, root, 0, max_gfn, false, false, shared);
+       zap_gfn_range(kvm, root, 0, -1ull, false, false, shared);
 
        call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback);
 }
@@ -724,13 +723,29 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
                          gfn_t start, gfn_t end, bool can_yield, bool flush,
                          bool shared)
 {
+       gfn_t max_gfn_host = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
+       bool zap_all = (start == 0 && end >= max_gfn_host);
        struct tdp_iter iter;
 
+       /*
+        * No need to try to step down in the iterator when zapping all SPTEs,
+        * zapping the top-level non-leaf SPTEs will recurse on their children.
+        */
+       int min_level = zap_all ? root->role.level : PG_LEVEL_4K;
+
+       /*
+        * Bound the walk at host.MAXPHYADDR, guest accesses beyond that will
+        * hit a #PF(RSVD) and never get to an EPT Violation/Misconfig / #NPF,
+        * and so KVM will never install a SPTE for such addresses.
+        */
+       end = min(end, max_gfn_host);
+
        kvm_lockdep_assert_mmu_lock_held(kvm, shared);
 
        rcu_read_lock();
 
-       tdp_root_for_each_pte(iter, root, start, end) {
+       for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
+                                  min_level, start, end) {
 retry:
                if (can_yield &&
                    tdp_mmu_iter_cond_resched(kvm, &iter, flush, shared)) {
@@ -744,9 +759,10 @@ retry:
                /*
                 * If this is a non-last-level SPTE that covers a larger range
                 * than should be zapped, continue, and zap the mappings at a
-                * lower level.
+                * lower level, except when zapping all SPTEs.
                 */
-               if ((iter.gfn < start ||
+               if (!zap_all &&
+                   (iter.gfn < start ||
                     iter.gfn + KVM_PAGES_PER_HPAGE(iter.level) > end) &&
                    !is_last_spte(iter.old_spte, iter.level))
                        continue;
@@ -794,12 +810,11 @@ bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
 
 void kvm_tdp_mmu_zap_all(struct kvm *kvm)
 {
-       gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
        bool flush = false;
        int i;
 
        for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
-               flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, 0, max_gfn,
+               flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, 0, -1ull,
                                                  flush, false);
 
        if (flush)
@@ -838,7 +853,6 @@ static struct kvm_mmu_page *next_invalidated_root(struct kvm *kvm,
  */
 void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
 {
-       gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
        struct kvm_mmu_page *next_root;
        struct kvm_mmu_page *root;
        bool flush = false;
@@ -854,8 +868,7 @@ void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
 
                rcu_read_unlock();
 
-               flush = zap_gfn_range(kvm, root, 0, max_gfn, true, flush,
-                                     true);
+               flush = zap_gfn_range(kvm, root, 0, -1ull, true, flush, true);
 
                /*
                 * Put the reference acquired in
index 1d01da6..a8ad78a 100644 (file)
@@ -646,7 +646,7 @@ out:
 void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
-       struct vmcb *vmcb = svm->vmcb;
+       struct vmcb *vmcb = svm->vmcb01.ptr;
        bool activated = kvm_vcpu_apicv_active(vcpu);
 
        if (!enable_apicv)
index 3bd09c5..e551547 100644 (file)
@@ -158,6 +158,9 @@ void recalc_intercepts(struct vcpu_svm *svm)
        /* If SMI is not intercepted, ignore guest SMI intercept as well  */
        if (!intercept_smi)
                vmcb_clr_intercept(c, INTERCEPT_SMI);
+
+       vmcb_set_intercept(c, INTERCEPT_VMLOAD);
+       vmcb_set_intercept(c, INTERCEPT_VMSAVE);
 }
 
 static void copy_vmcb_control_area(struct vmcb_control_area *dst,
@@ -503,7 +506,11 @@ static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12
 
 static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
 {
-       const u32 mask = V_INTR_MASKING_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK;
+       const u32 int_ctl_vmcb01_bits =
+               V_INTR_MASKING_MASK | V_GIF_MASK | V_GIF_ENABLE_MASK;
+
+       const u32 int_ctl_vmcb12_bits = V_TPR_MASK | V_IRQ_INJECTION_BITS_MASK;
+
        struct kvm_vcpu *vcpu = &svm->vcpu;
 
        /*
@@ -515,7 +522,7 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
         * Also covers avic_vapic_bar, avic_backing_page, avic_logical_id,
         * avic_physical_id.
         */
-       WARN_ON(svm->vmcb01.ptr->control.int_ctl & AVIC_ENABLE_MASK);
+       WARN_ON(kvm_apicv_activated(svm->vcpu.kvm));
 
        /* Copied from vmcb01.  msrpm_base can be overwritten later.  */
        svm->vmcb->control.nested_ctl = svm->vmcb01.ptr->control.nested_ctl;
@@ -535,8 +542,8 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
                vcpu->arch.l1_tsc_offset + svm->nested.ctl.tsc_offset;
 
        svm->vmcb->control.int_ctl             =
-               (svm->nested.ctl.int_ctl & ~mask) |
-               (svm->vmcb01.ptr->control.int_ctl & mask);
+               (svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) |
+               (svm->vmcb01.ptr->control.int_ctl & int_ctl_vmcb01_bits);
 
        svm->vmcb->control.virt_ext            = svm->nested.ctl.virt_ext;
        svm->vmcb->control.int_vector          = svm->nested.ctl.int_vector;
@@ -702,8 +709,8 @@ out:
 }
 
 /* Copy state save area fields which are handled by VMRUN */
-void svm_copy_vmrun_state(struct vmcb_save_area *from_save,
-                         struct vmcb_save_area *to_save)
+void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
+                         struct vmcb_save_area *from_save)
 {
        to_save->es = from_save->es;
        to_save->cs = from_save->cs;
@@ -722,7 +729,7 @@ void svm_copy_vmrun_state(struct vmcb_save_area *from_save,
        to_save->cpl = 0;
 }
 
-void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
+void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
 {
        to_vmcb->save.fs = from_vmcb->save.fs;
        to_vmcb->save.gs = from_vmcb->save.gs;
@@ -1385,7 +1392,7 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
 
        svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa;
 
-       svm_copy_vmrun_state(save, &svm->vmcb01.ptr->save);
+       svm_copy_vmrun_state(&svm->vmcb01.ptr->save, save);
        nested_load_control_from_vmcb12(svm, ctl);
 
        svm_switch_vmcb(svm, &svm->nested.vmcb02);
index 6710d9e..7fbce34 100644 (file)
@@ -64,6 +64,7 @@ static DEFINE_MUTEX(sev_bitmap_lock);
 unsigned int max_sev_asid;
 static unsigned int min_sev_asid;
 static unsigned long sev_me_mask;
+static unsigned int nr_asids;
 static unsigned long *sev_asid_bitmap;
 static unsigned long *sev_reclaim_asid_bitmap;
 
@@ -78,11 +79,11 @@ struct enc_region {
 /* Called with the sev_bitmap_lock held, or on shutdown  */
 static int sev_flush_asids(int min_asid, int max_asid)
 {
-       int ret, pos, error = 0;
+       int ret, asid, error = 0;
 
        /* Check if there are any ASIDs to reclaim before performing a flush */
-       pos = find_next_bit(sev_reclaim_asid_bitmap, max_asid, min_asid);
-       if (pos >= max_asid)
+       asid = find_next_bit(sev_reclaim_asid_bitmap, nr_asids, min_asid);
+       if (asid > max_asid)
                return -EBUSY;
 
        /*
@@ -115,15 +116,15 @@ static bool __sev_recycle_asids(int min_asid, int max_asid)
 
        /* The flush process will flush all reclaimable SEV and SEV-ES ASIDs */
        bitmap_xor(sev_asid_bitmap, sev_asid_bitmap, sev_reclaim_asid_bitmap,
-                  max_sev_asid);
-       bitmap_zero(sev_reclaim_asid_bitmap, max_sev_asid);
+                  nr_asids);
+       bitmap_zero(sev_reclaim_asid_bitmap, nr_asids);
 
        return true;
 }
 
 static int sev_asid_new(struct kvm_sev_info *sev)
 {
-       int pos, min_asid, max_asid, ret;
+       int asid, min_asid, max_asid, ret;
        bool retry = true;
        enum misc_res_type type;
 
@@ -143,11 +144,11 @@ static int sev_asid_new(struct kvm_sev_info *sev)
         * SEV-enabled guests must use asid from min_sev_asid to max_sev_asid.
         * SEV-ES-enabled guest can use from 1 to min_sev_asid - 1.
         */
-       min_asid = sev->es_active ? 0 : min_sev_asid - 1;
+       min_asid = sev->es_active ? 1 : min_sev_asid;
        max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid;
 again:
-       pos = find_next_zero_bit(sev_asid_bitmap, max_sev_asid, min_asid);
-       if (pos >= max_asid) {
+       asid = find_next_zero_bit(sev_asid_bitmap, max_asid + 1, min_asid);
+       if (asid > max_asid) {
                if (retry && __sev_recycle_asids(min_asid, max_asid)) {
                        retry = false;
                        goto again;
@@ -157,11 +158,11 @@ again:
                goto e_uncharge;
        }
 
-       __set_bit(pos, sev_asid_bitmap);
+       __set_bit(asid, sev_asid_bitmap);
 
        mutex_unlock(&sev_bitmap_lock);
 
-       return pos + 1;
+       return asid;
 e_uncharge:
        misc_cg_uncharge(type, sev->misc_cg, 1);
        put_misc_cg(sev->misc_cg);
@@ -179,17 +180,16 @@ static int sev_get_asid(struct kvm *kvm)
 static void sev_asid_free(struct kvm_sev_info *sev)
 {
        struct svm_cpu_data *sd;
-       int cpu, pos;
+       int cpu;
        enum misc_res_type type;
 
        mutex_lock(&sev_bitmap_lock);
 
-       pos = sev->asid - 1;
-       __set_bit(pos, sev_reclaim_asid_bitmap);
+       __set_bit(sev->asid, sev_reclaim_asid_bitmap);
 
        for_each_possible_cpu(cpu) {
                sd = per_cpu(svm_data, cpu);
-               sd->sev_vmcbs[pos] = NULL;
+               sd->sev_vmcbs[sev->asid] = NULL;
        }
 
        mutex_unlock(&sev_bitmap_lock);
@@ -1857,12 +1857,17 @@ void __init sev_hardware_setup(void)
        min_sev_asid = edx;
        sev_me_mask = 1UL << (ebx & 0x3f);
 
-       /* Initialize SEV ASID bitmaps */
-       sev_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
+       /*
+        * Initialize SEV ASID bitmaps. Allocate space for ASID 0 in the bitmap,
+        * even though it's never used, so that the bitmap is indexed by the
+        * actual ASID.
+        */
+       nr_asids = max_sev_asid + 1;
+       sev_asid_bitmap = bitmap_zalloc(nr_asids, GFP_KERNEL);
        if (!sev_asid_bitmap)
                goto out;
 
-       sev_reclaim_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
+       sev_reclaim_asid_bitmap = bitmap_zalloc(nr_asids, GFP_KERNEL);
        if (!sev_reclaim_asid_bitmap) {
                bitmap_free(sev_asid_bitmap);
                sev_asid_bitmap = NULL;
@@ -1907,7 +1912,7 @@ void sev_hardware_teardown(void)
                return;
 
        /* No need to take sev_bitmap_lock, all VMs have been destroyed. */
-       sev_flush_asids(0, max_sev_asid);
+       sev_flush_asids(1, max_sev_asid);
 
        bitmap_free(sev_asid_bitmap);
        bitmap_free(sev_reclaim_asid_bitmap);
@@ -1921,7 +1926,7 @@ int sev_cpu_init(struct svm_cpu_data *sd)
        if (!sev_enabled)
                return 0;
 
-       sd->sev_vmcbs = kcalloc(max_sev_asid + 1, sizeof(void *), GFP_KERNEL);
+       sd->sev_vmcbs = kcalloc(nr_asids, sizeof(void *), GFP_KERNEL);
        if (!sd->sev_vmcbs)
                return -ENOMEM;
 
index 664d20f..69639f9 100644 (file)
@@ -1406,8 +1406,6 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
                goto error_free_vmsa_page;
        }
 
-       svm_vcpu_init_msrpm(vcpu, svm->msrpm);
-
        svm->vmcb01.ptr = page_address(vmcb01_page);
        svm->vmcb01.pa = __sme_set(page_to_pfn(vmcb01_page) << PAGE_SHIFT);
 
@@ -1419,6 +1417,8 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
        svm_switch_vmcb(svm, &svm->vmcb01);
        init_vmcb(vcpu);
 
+       svm_vcpu_init_msrpm(vcpu, svm->msrpm);
+
        svm_init_osvw(vcpu);
        vcpu->arch.microcode_version = 0x01000065;
 
@@ -1568,8 +1568,11 @@ static void svm_set_vintr(struct vcpu_svm *svm)
 {
        struct vmcb_control_area *control;
 
-       /* The following fields are ignored when AVIC is enabled */
-       WARN_ON(kvm_vcpu_apicv_active(&svm->vcpu));
+       /*
+        * The following fields are ignored when AVIC is enabled
+        */
+       WARN_ON(kvm_apicv_activated(svm->vcpu.kvm));
+
        svm_set_intercept(svm, INTERCEPT_VINTR);
 
        /*
@@ -1586,17 +1589,18 @@ static void svm_set_vintr(struct vcpu_svm *svm)
 
 static void svm_clear_vintr(struct vcpu_svm *svm)
 {
-       const u32 mask = V_TPR_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK | V_INTR_MASKING_MASK;
        svm_clr_intercept(svm, INTERCEPT_VINTR);
 
        /* Drop int_ctl fields related to VINTR injection.  */
-       svm->vmcb->control.int_ctl &= mask;
+       svm->vmcb->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK;
        if (is_guest_mode(&svm->vcpu)) {
-               svm->vmcb01.ptr->control.int_ctl &= mask;
+               svm->vmcb01.ptr->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK;
 
                WARN_ON((svm->vmcb->control.int_ctl & V_TPR_MASK) !=
                        (svm->nested.ctl.int_ctl & V_TPR_MASK));
-               svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl & ~mask;
+
+               svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl &
+                       V_IRQ_INJECTION_BITS_MASK;
        }
 
        vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
@@ -2147,11 +2151,12 @@ static int vmload_vmsave_interception(struct kvm_vcpu *vcpu, bool vmload)
        ret = kvm_skip_emulated_instruction(vcpu);
 
        if (vmload) {
-               nested_svm_vmloadsave(vmcb12, svm->vmcb);
+               svm_copy_vmloadsave_state(svm->vmcb, vmcb12);
                svm->sysenter_eip_hi = 0;
                svm->sysenter_esp_hi = 0;
-       } else
-               nested_svm_vmloadsave(svm->vmcb, vmcb12);
+       } else {
+               svm_copy_vmloadsave_state(vmcb12, svm->vmcb);
+       }
 
        kvm_vcpu_unmap(vcpu, &map, true);
 
@@ -4344,8 +4349,8 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
 
                BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400);
 
-               svm_copy_vmrun_state(&svm->vmcb01.ptr->save,
-                                    map_save.hva + 0x400);
+               svm_copy_vmrun_state(map_save.hva + 0x400,
+                                    &svm->vmcb01.ptr->save);
 
                kvm_vcpu_unmap(vcpu, &map_save, true);
        }
@@ -4393,8 +4398,8 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
                                         &map_save) == -EINVAL)
                                return 1;
 
-                       svm_copy_vmrun_state(map_save.hva + 0x400,
-                                            &svm->vmcb01.ptr->save);
+                       svm_copy_vmrun_state(&svm->vmcb01.ptr->save,
+                                            map_save.hva + 0x400);
 
                        kvm_vcpu_unmap(vcpu, &map_save, true);
                }
index 7e20907..bd0fe94 100644 (file)
@@ -464,9 +464,9 @@ void svm_leave_nested(struct vcpu_svm *svm);
 void svm_free_nested(struct vcpu_svm *svm);
 int svm_allocate_nested(struct vcpu_svm *svm);
 int nested_svm_vmrun(struct kvm_vcpu *vcpu);
-void svm_copy_vmrun_state(struct vmcb_save_area *from_save,
-                         struct vmcb_save_area *to_save);
-void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb);
+void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
+                         struct vmcb_save_area *from_save);
+void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb);
 int nested_svm_vmexit(struct vcpu_svm *svm);
 
 static inline int nested_svm_simple_vmexit(struct vcpu_svm *svm, u32 exit_code)
index 9b9a55a..c53b8bf 100644 (file)
@@ -89,7 +89,7 @@ static inline void svm_hv_vmcb_dirty_nested_enlightenments(
         * as we mark it dirty unconditionally towards end of vcpu
         * init phase.
         */
-       if (vmcb && vmcb_is_clean(vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS) &&
+       if (vmcb_is_clean(vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS) &&
            hve->hv_enlightenments_control.msr_bitmap)
                vmcb_mark_dirty(vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS);
 }
index b484141..03ebe36 100644 (file)
@@ -92,6 +92,21 @@ TRACE_EVENT(kvm_hv_hypercall,
                  __entry->outgpa)
 );
 
+TRACE_EVENT(kvm_hv_hypercall_done,
+       TP_PROTO(u64 result),
+       TP_ARGS(result),
+
+       TP_STRUCT__entry(
+               __field(__u64, result)
+       ),
+
+       TP_fast_assign(
+               __entry->result = result;
+       ),
+
+       TP_printk("result 0x%llx", __entry->result)
+);
+
 /*
  * Tracepoint for Xen hypercall.
  */
index 1a52134..b3f77d1 100644 (file)
@@ -330,6 +330,31 @@ void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu)
        vcpu_put(vcpu);
 }
 
+#define EPTP_PA_MASK   GENMASK_ULL(51, 12)
+
+static bool nested_ept_root_matches(hpa_t root_hpa, u64 root_eptp, u64 eptp)
+{
+       return VALID_PAGE(root_hpa) &&
+              ((root_eptp & EPTP_PA_MASK) == (eptp & EPTP_PA_MASK));
+}
+
+static void nested_ept_invalidate_addr(struct kvm_vcpu *vcpu, gpa_t eptp,
+                                      gpa_t addr)
+{
+       uint i;
+       struct kvm_mmu_root_info *cached_root;
+
+       WARN_ON_ONCE(!mmu_is_nested(vcpu));
+
+       for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
+               cached_root = &vcpu->arch.mmu->prev_roots[i];
+
+               if (nested_ept_root_matches(cached_root->hpa, cached_root->pgd,
+                                           eptp))
+                       vcpu->arch.mmu->invlpg(vcpu, addr, cached_root->hpa);
+       }
+}
+
 static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
                struct x86_exception *fault)
 {
@@ -342,10 +367,22 @@ static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
                vm_exit_reason = EXIT_REASON_PML_FULL;
                vmx->nested.pml_full = false;
                exit_qualification &= INTR_INFO_UNBLOCK_NMI;
-       } else if (fault->error_code & PFERR_RSVD_MASK)
-               vm_exit_reason = EXIT_REASON_EPT_MISCONFIG;
-       else
-               vm_exit_reason = EXIT_REASON_EPT_VIOLATION;
+       } else {
+               if (fault->error_code & PFERR_RSVD_MASK)
+                       vm_exit_reason = EXIT_REASON_EPT_MISCONFIG;
+               else
+                       vm_exit_reason = EXIT_REASON_EPT_VIOLATION;
+
+               /*
+                * Although the caller (kvm_inject_emulated_page_fault) would
+                * have already synced the faulting address in the shadow EPT
+                * tables for the current EPTP12, we also need to sync it for
+                * any other cached EPTP02s based on the same EP4TA, since the
+                * TLB associates mappings to the EP4TA rather than the full EPTP.
+                */
+               nested_ept_invalidate_addr(vcpu, vmcs12->ept_pointer,
+                                          fault->address);
+       }
 
        nested_vmx_vmexit(vcpu, vm_exit_reason, 0, exit_qualification);
        vmcs12->guest_physical_address = fault->address;
@@ -5325,14 +5362,6 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
        return nested_vmx_succeed(vcpu);
 }
 
-#define EPTP_PA_MASK   GENMASK_ULL(51, 12)
-
-static bool nested_ept_root_matches(hpa_t root_hpa, u64 root_eptp, u64 eptp)
-{
-       return VALID_PAGE(root_hpa) &&
-               ((root_eptp & EPTP_PA_MASK) == (eptp & EPTP_PA_MASK));
-}
-
 /* Emulate the INVEPT instruction */
 static int handle_invept(struct kvm_vcpu *vcpu)
 {
@@ -5826,7 +5855,8 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu,
                if (is_nmi(intr_info))
                        return true;
                else if (is_page_fault(intr_info))
-                       return vcpu->arch.apf.host_apf_flags || !enable_ept;
+                       return vcpu->arch.apf.host_apf_flags ||
+                              vmx_need_pf_intercept(vcpu);
                else if (is_debug(intr_info) &&
                         vcpu->guest_debug &
                         (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
index db88ed4..17a1cb4 100644 (file)
@@ -522,7 +522,7 @@ static inline struct vmcs *alloc_vmcs(bool shadow)
 
 static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx)
 {
-       return vmx->secondary_exec_control &
+       return secondary_exec_controls_get(vmx) &
                SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE;
 }
 
index a4fd106..e5d5c5e 100644 (file)
@@ -3407,7 +3407,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                        return 1;
                break;
        case MSR_KVM_ASYNC_PF_ACK:
-               if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF))
+               if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
                        return 1;
                if (data & 0x1) {
                        vcpu->arch.apf.pageready_pending = false;
@@ -3746,7 +3746,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                msr_info->data = vcpu->arch.apf.msr_int_val;
                break;
        case MSR_KVM_ASYNC_PF_ACK:
-               if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF))
+               if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
                        return 1;
 
                msr_info->data = 0;
@@ -4358,8 +4358,17 @@ static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu)
 
 static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu)
 {
-       return kvm_arch_interrupt_allowed(vcpu) &&
-               kvm_cpu_accept_dm_intr(vcpu);
+       /*
+        * Do not cause an interrupt window exit if an exception
+        * is pending or an event needs reinjection; userspace
+        * might want to inject the interrupt manually using KVM_SET_REGS
+        * or KVM_SET_SREGS.  For that to work, we must be at an
+        * instruction boundary and with no events half-injected.
+        */
+       return (kvm_arch_interrupt_allowed(vcpu) &&
+               kvm_cpu_accept_dm_intr(vcpu) &&
+               !kvm_event_needs_reinjection(vcpu) &&
+               !vcpu->arch.exception.pending);
 }
 
 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
index bad4dee..c6506c6 100644 (file)
@@ -44,6 +44,7 @@ obj-$(CONFIG_SMP) += msr-smp.o cache-smp.o
 lib-y := delay.o misc.o cmdline.o cpu.o
 lib-y += usercopy_$(BITS).o usercopy.o getuser.o putuser.o
 lib-y += memcpy_$(BITS).o
+lib-y += pc-conf-reg.o
 lib-$(CONFIG_ARCH_HAS_COPY_MC) += copy_mc.o copy_mc_64.o
 lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o insn-eval.o
 lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
diff --git a/arch/x86/lib/pc-conf-reg.c b/arch/x86/lib/pc-conf-reg.c
new file mode 100644 (file)
index 0000000..febb527
--- /dev/null
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for the configuration register space at port I/O locations
+ * 0x22 and 0x23 variously used by PC architectures, e.g. the MP Spec,
+ * Cyrix CPUs, numerous chipsets.  As the space is indirectly addressed
+ * it may have to be protected with a spinlock, depending on the context.
+ */
+
+#include <linux/spinlock.h>
+
+#include <asm/pc-conf-reg.h>
+
+DEFINE_RAW_SPINLOCK(pc_conf_lock);
index cd768da..933a2eb 100644 (file)
@@ -376,12 +376,12 @@ static void enter_uniprocessor(void)
                goto out;
        }
 
-       get_online_cpus();
+       cpus_read_lock();
        cpumask_copy(downed_cpus, cpu_online_mask);
        cpumask_clear_cpu(cpumask_first(cpu_online_mask), downed_cpus);
        if (num_online_cpus() > 1)
                pr_notice("Disabling non-boot CPUs...\n");
-       put_online_cpus();
+       cpus_read_unlock();
 
        for_each_cpu(cpu, downed_cpus) {
                err = remove_cpu(cpu);
index cfe6b1e..59ba296 100644 (file)
@@ -8,11 +8,13 @@
 #include <linux/export.h>
 #include <linux/cpu.h>
 #include <linux/debugfs.h>
+#include <linux/sched/smt.h>
 
 #include <asm/tlbflush.h>
 #include <asm/mmu_context.h>
 #include <asm/nospec-branch.h>
 #include <asm/cache.h>
+#include <asm/cacheflush.h>
 #include <asm/apic.h>
 #include <asm/perf_event.h>
 
  */
 
 /*
- * Use bit 0 to mangle the TIF_SPEC_IB state into the mm pointer which is
- * stored in cpu_tlb_state.last_user_mm_ibpb.
+ * Bits to mangle the TIF_SPEC_* state into the mm pointer which is
+ * stored in cpu_tlb_state.last_user_mm_spec.
  */
 #define LAST_USER_MM_IBPB      0x1UL
+#define LAST_USER_MM_L1D_FLUSH 0x2UL
+#define LAST_USER_MM_SPEC_MASK (LAST_USER_MM_IBPB | LAST_USER_MM_L1D_FLUSH)
+
+/* Bits to set when tlbstate and flush is (re)initialized */
+#define LAST_USER_MM_INIT      LAST_USER_MM_IBPB
 
 /*
  * The x86 feature is called PCID (Process Context IDentifier). It is similar
@@ -317,20 +324,70 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
        local_irq_restore(flags);
 }
 
-static unsigned long mm_mangle_tif_spec_ib(struct task_struct *next)
+/*
+ * Invoked from return to user/guest by a task that opted-in to L1D
+ * flushing but ended up running on an SMT enabled core due to wrong
+ * affinity settings or CPU hotplug. This is part of the paranoid L1D flush
+ * contract which this task requested.
+ */
+static void l1d_flush_force_sigbus(struct callback_head *ch)
+{
+       force_sig(SIGBUS);
+}
+
+static void l1d_flush_evaluate(unsigned long prev_mm, unsigned long next_mm,
+                               struct task_struct *next)
+{
+       /* Flush L1D if the outgoing task requests it */
+       if (prev_mm & LAST_USER_MM_L1D_FLUSH)
+               wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
+
+       /* Check whether the incoming task opted in for L1D flush */
+       if (likely(!(next_mm & LAST_USER_MM_L1D_FLUSH)))
+               return;
+
+       /*
+        * Validate that it is not running on an SMT sibling as this would
+        * make the excercise pointless because the siblings share L1D. If
+        * it runs on a SMT sibling, notify it with SIGBUS on return to
+        * user/guest
+        */
+       if (this_cpu_read(cpu_info.smt_active)) {
+               clear_ti_thread_flag(&next->thread_info, TIF_SPEC_L1D_FLUSH);
+               next->l1d_flush_kill.func = l1d_flush_force_sigbus;
+               task_work_add(next, &next->l1d_flush_kill, TWA_RESUME);
+       }
+}
+
+static unsigned long mm_mangle_tif_spec_bits(struct task_struct *next)
 {
        unsigned long next_tif = task_thread_info(next)->flags;
-       unsigned long ibpb = (next_tif >> TIF_SPEC_IB) & LAST_USER_MM_IBPB;
+       unsigned long spec_bits = (next_tif >> TIF_SPEC_IB) & LAST_USER_MM_SPEC_MASK;
 
-       return (unsigned long)next->mm | ibpb;
+       /*
+        * Ensure that the bit shift above works as expected and the two flags
+        * end up in bit 0 and 1.
+        */
+       BUILD_BUG_ON(TIF_SPEC_L1D_FLUSH != TIF_SPEC_IB + 1);
+
+       return (unsigned long)next->mm | spec_bits;
 }
 
-static void cond_ibpb(struct task_struct *next)
+static void cond_mitigation(struct task_struct *next)
 {
+       unsigned long prev_mm, next_mm;
+
        if (!next || !next->mm)
                return;
 
+       next_mm = mm_mangle_tif_spec_bits(next);
+       prev_mm = this_cpu_read(cpu_tlbstate.last_user_mm_spec);
+
        /*
+        * Avoid user/user BTB poisoning by flushing the branch predictor
+        * when switching between processes. This stops one process from
+        * doing Spectre-v2 attacks on another.
+        *
         * Both, the conditional and the always IBPB mode use the mm
         * pointer to avoid the IBPB when switching between tasks of the
         * same process. Using the mm pointer instead of mm->context.ctx_id
@@ -340,8 +397,6 @@ static void cond_ibpb(struct task_struct *next)
         * exposed data is not really interesting.
         */
        if (static_branch_likely(&switch_mm_cond_ibpb)) {
-               unsigned long prev_mm, next_mm;
-
                /*
                 * This is a bit more complex than the always mode because
                 * it has to handle two cases:
@@ -371,20 +426,14 @@ static void cond_ibpb(struct task_struct *next)
                 * Optimize this with reasonably small overhead for the
                 * above cases. Mangle the TIF_SPEC_IB bit into the mm
                 * pointer of the incoming task which is stored in
-                * cpu_tlbstate.last_user_mm_ibpb for comparison.
-                */
-               next_mm = mm_mangle_tif_spec_ib(next);
-               prev_mm = this_cpu_read(cpu_tlbstate.last_user_mm_ibpb);
-
-               /*
+                * cpu_tlbstate.last_user_mm_spec for comparison.
+                *
                 * Issue IBPB only if the mm's are different and one or
                 * both have the IBPB bit set.
                 */
                if (next_mm != prev_mm &&
                    (next_mm | prev_mm) & LAST_USER_MM_IBPB)
                        indirect_branch_prediction_barrier();
-
-               this_cpu_write(cpu_tlbstate.last_user_mm_ibpb, next_mm);
        }
 
        if (static_branch_unlikely(&switch_mm_always_ibpb)) {
@@ -393,11 +442,22 @@ static void cond_ibpb(struct task_struct *next)
                 * different context than the user space task which ran
                 * last on this CPU.
                 */
-               if (this_cpu_read(cpu_tlbstate.last_user_mm) != next->mm) {
+               if ((prev_mm & ~LAST_USER_MM_SPEC_MASK) !=
+                                       (unsigned long)next->mm)
                        indirect_branch_prediction_barrier();
-                       this_cpu_write(cpu_tlbstate.last_user_mm, next->mm);
-               }
        }
+
+       if (static_branch_unlikely(&switch_mm_cond_l1d_flush)) {
+               /*
+                * Flush L1D when the outgoing task requested it and/or
+                * check whether the incoming task requested L1D flushing
+                * and ended up on an SMT sibling.
+                */
+               if (unlikely((prev_mm | next_mm) & LAST_USER_MM_L1D_FLUSH))
+                       l1d_flush_evaluate(prev_mm, next_mm, next);
+       }
+
+       this_cpu_write(cpu_tlbstate.last_user_mm_spec, next_mm);
 }
 
 #ifdef CONFIG_PERF_EVENTS
@@ -531,11 +591,10 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
                need_flush = true;
        } else {
                /*
-                * Avoid user/user BTB poisoning by flushing the branch
-                * predictor when switching between processes. This stops
-                * one process from doing Spectre-v2 attacks on another.
+                * Apply process to process speculation vulnerability
+                * mitigations if applicable.
                 */
-               cond_ibpb(tsk);
+               cond_mitigation(tsk);
 
                /*
                 * Stop remote flushes for the previous mm.
@@ -643,7 +702,7 @@ void initialize_tlbstate_and_flush(void)
        write_cr3(build_cr3(mm->pgd, 0));
 
        /* Reinitialize tlbstate. */
-       this_cpu_write(cpu_tlbstate.last_user_mm_ibpb, LAST_USER_MM_IBPB);
+       this_cpu_write(cpu_tlbstate.last_user_mm_spec, LAST_USER_MM_INIT);
        this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0);
        this_cpu_write(cpu_tlbstate.next_asid, 1);
        this_cpu_write(cpu_tlbstate.ctxs[0].ctx_id, mm->context.ctx_id);
index 4b95145..16d76f8 100644 (file)
@@ -1219,6 +1219,13 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
                        }
                        break;
 
+                       /* speculation barrier */
+               case BPF_ST | BPF_NOSPEC:
+                       if (boot_cpu_has(X86_FEATURE_XMM2))
+                               /* Emit 'lfence' */
+                               EMIT3(0x0F, 0xAE, 0xE8);
+                       break;
+
                        /* ST: *(u8*)(dst_reg + off) = imm */
                case BPF_ST | BPF_MEM | BPF_B:
                        if (is_ereg(dst_reg))
index 3da88de..3bfda5f 100644 (file)
@@ -1886,6 +1886,12 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
                        i++;
                        break;
                }
+               /* speculation barrier */
+               case BPF_ST | BPF_NOSPEC:
+                       if (boot_cpu_has(X86_FEATURE_XMM2))
+                               /* Emit 'lfence' */
+                               EMIT3(0x0F, 0xAE, 0xE8);
+                       break;
                /* ST: *(u8*)(dst_reg + off) = imm */
                case BPF_ST | BPF_MEM | BPF_H:
                case BPF_ST | BPF_MEM | BPF_B:
index d3a73f9..97b63e3 100644 (file)
 #include <linux/dmi.h>
 #include <linux/io.h>
 #include <linux/smp.h>
+#include <linux/spinlock.h>
 #include <asm/io_apic.h>
 #include <linux/irq.h>
 #include <linux/acpi.h>
+
+#include <asm/i8259.h>
+#include <asm/pc-conf-reg.h>
 #include <asm/pci_x86.h>
 
 #define PIRQ_SIGNATURE (('$' << 0) + ('P' << 8) + ('I' << 16) + ('R' << 24))
@@ -47,6 +51,8 @@ struct irq_router {
        int (*get)(struct pci_dev *router, struct pci_dev *dev, int pirq);
        int (*set)(struct pci_dev *router, struct pci_dev *dev, int pirq,
                int new);
+       int (*lvl)(struct pci_dev *router, struct pci_dev *dev, int pirq,
+               int irq);
 };
 
 struct irq_router_handler {
@@ -153,7 +159,7 @@ static void __init pirq_peer_trick(void)
 void elcr_set_level_irq(unsigned int irq)
 {
        unsigned char mask = 1 << (irq & 7);
-       unsigned int port = 0x4d0 + (irq >> 3);
+       unsigned int port = PIC_ELCR1 + (irq >> 3);
        unsigned char val;
        static u16 elcr_irq_mask;
 
@@ -169,6 +175,139 @@ void elcr_set_level_irq(unsigned int irq)
        }
 }
 
+/*
+ *     PIRQ routing for the M1487 ISA Bus Controller (IBC) ASIC used
+ *     with the ALi FinALi 486 chipset.  The IBC is not decoded in the
+ *     PCI configuration space, so we identify it by the accompanying
+ *     M1489 Cache-Memory PCI Controller (CMP) ASIC.
+ *
+ *     There are four 4-bit mappings provided, spread across two PCI
+ *     INTx Routing Table Mapping Registers, available in the port I/O
+ *     space accessible indirectly via the index/data register pair at
+ *     0x22/0x23, located at indices 0x42 and 0x43 for the INT1/INT2
+ *     and INT3/INT4 lines respectively.  The INT1/INT3 and INT2/INT4
+ *     lines are mapped in the low and the high 4-bit nibble of the
+ *     corresponding register as follows:
+ *
+ *     0000 : Disabled
+ *     0001 : IRQ9
+ *     0010 : IRQ3
+ *     0011 : IRQ10
+ *     0100 : IRQ4
+ *     0101 : IRQ5
+ *     0110 : IRQ7
+ *     0111 : IRQ6
+ *     1000 : Reserved
+ *     1001 : IRQ11
+ *     1010 : Reserved
+ *     1011 : IRQ12
+ *     1100 : Reserved
+ *     1101 : IRQ14
+ *     1110 : Reserved
+ *     1111 : IRQ15
+ *
+ *     In addition to the usual ELCR register pair there is a separate
+ *     PCI INTx Sensitivity Register at index 0x44 in the same port I/O
+ *     space, whose bits 3:0 select the trigger mode for INT[4:1] lines
+ *     respectively.  Any bit set to 1 causes interrupts coming on the
+ *     corresponding line to be passed to ISA as edge-triggered and
+ *     otherwise they are passed as level-triggered.  Manufacturer's
+ *     documentation says this register has to be set consistently with
+ *     the relevant ELCR register.
+ *
+ *     Accesses to the port I/O space concerned here need to be unlocked
+ *     by writing the value of 0xc5 to the Lock Register at index 0x03
+ *     beforehand.  Any other value written to said register prevents
+ *     further accesses from reaching the register file, except for the
+ *     Lock Register being written with 0xc5 again.
+ *
+ *     References:
+ *
+ *     "M1489/M1487: 486 PCI Chip Set", Version 1.2, Acer Laboratories
+ *     Inc., July 1997
+ */
+
+#define PC_CONF_FINALI_LOCK            0x03u
+#define PC_CONF_FINALI_PCI_INTX_RT1    0x42u
+#define PC_CONF_FINALI_PCI_INTX_RT2    0x43u
+#define PC_CONF_FINALI_PCI_INTX_SENS   0x44u
+
+#define PC_CONF_FINALI_LOCK_KEY                0xc5u
+
+static u8 read_pc_conf_nybble(u8 base, u8 index)
+{
+       u8 reg = base + (index >> 1);
+       u8 x;
+
+       x = pc_conf_get(reg);
+       return index & 1 ? x >> 4 : x & 0xf;
+}
+
+static void write_pc_conf_nybble(u8 base, u8 index, u8 val)
+{
+       u8 reg = base + (index >> 1);
+       u8 x;
+
+       x = pc_conf_get(reg);
+       x = index & 1 ? (x & 0x0f) | (val << 4) : (x & 0xf0) | val;
+       pc_conf_set(reg, x);
+}
+
+static int pirq_finali_get(struct pci_dev *router, struct pci_dev *dev,
+                          int pirq)
+{
+       static const u8 irqmap[16] = {
+               0, 9, 3, 10, 4, 5, 7, 6, 0, 11, 0, 12, 0, 14, 0, 15
+       };
+       unsigned long flags;
+       u8 x;
+
+       raw_spin_lock_irqsave(&pc_conf_lock, flags);
+       pc_conf_set(PC_CONF_FINALI_LOCK, PC_CONF_FINALI_LOCK_KEY);
+       x = irqmap[read_pc_conf_nybble(PC_CONF_FINALI_PCI_INTX_RT1, pirq - 1)];
+       pc_conf_set(PC_CONF_FINALI_LOCK, 0);
+       raw_spin_unlock_irqrestore(&pc_conf_lock, flags);
+       return x;
+}
+
+static int pirq_finali_set(struct pci_dev *router, struct pci_dev *dev,
+                          int pirq, int irq)
+{
+       static const u8 irqmap[16] = {
+               0, 0, 0, 2, 4, 5, 7, 6, 0, 1, 3, 9, 11, 0, 13, 15
+       };
+       u8 val = irqmap[irq];
+       unsigned long flags;
+
+       if (!val)
+               return 0;
+
+       raw_spin_lock_irqsave(&pc_conf_lock, flags);
+       pc_conf_set(PC_CONF_FINALI_LOCK, PC_CONF_FINALI_LOCK_KEY);
+       write_pc_conf_nybble(PC_CONF_FINALI_PCI_INTX_RT1, pirq - 1, val);
+       pc_conf_set(PC_CONF_FINALI_LOCK, 0);
+       raw_spin_unlock_irqrestore(&pc_conf_lock, flags);
+       return 1;
+}
+
+static int pirq_finali_lvl(struct pci_dev *router, struct pci_dev *dev,
+                          int pirq, int irq)
+{
+       u8 mask = ~(1u << (pirq - 1));
+       unsigned long flags;
+       u8 trig;
+
+       elcr_set_level_irq(irq);
+       raw_spin_lock_irqsave(&pc_conf_lock, flags);
+       pc_conf_set(PC_CONF_FINALI_LOCK, PC_CONF_FINALI_LOCK_KEY);
+       trig = pc_conf_get(PC_CONF_FINALI_PCI_INTX_SENS);
+       trig &= mask;
+       pc_conf_set(PC_CONF_FINALI_PCI_INTX_SENS, trig);
+       pc_conf_set(PC_CONF_FINALI_LOCK, 0);
+       raw_spin_unlock_irqrestore(&pc_conf_lock, flags);
+       return 1;
+}
+
 /*
  * Common IRQ routing practice: nibbles in config space,
  * offset by some magic constant.
@@ -219,6 +358,74 @@ static int pirq_ali_set(struct pci_dev *router, struct pci_dev *dev, int pirq, i
        return 0;
 }
 
+/*
+ *     PIRQ routing for the 82374EB/82374SB EISA System Component (ESC)
+ *     ASIC used with the Intel 82420 and 82430 PCIsets.  The ESC is not
+ *     decoded in the PCI configuration space, so we identify it by the
+ *     accompanying 82375EB/82375SB PCI-EISA Bridge (PCEB) ASIC.
+ *
+ *     There are four PIRQ Route Control registers, available in the
+ *     port I/O space accessible indirectly via the index/data register
+ *     pair at 0x22/0x23, located at indices 0x60/0x61/0x62/0x63 for the
+ *     PIRQ0/1/2/3# lines respectively.  The semantics is the same as
+ *     with the PIIX router.
+ *
+ *     Accesses to the port I/O space concerned here need to be unlocked
+ *     by writing the value of 0x0f to the ESC ID Register at index 0x02
+ *     beforehand.  Any other value written to said register prevents
+ *     further accesses from reaching the register file, except for the
+ *     ESC ID Register being written with 0x0f again.
+ *
+ *     References:
+ *
+ *     "82374EB/82374SB EISA System Component (ESC)", Intel Corporation,
+ *     Order Number: 290476-004, March 1996
+ *
+ *     "82375EB/82375SB PCI-EISA Bridge (PCEB)", Intel Corporation, Order
+ *     Number: 290477-004, March 1996
+ */
+
+#define PC_CONF_I82374_ESC_ID                  0x02u
+#define PC_CONF_I82374_PIRQ_ROUTE_CONTROL      0x60u
+
+#define PC_CONF_I82374_ESC_ID_KEY              0x0fu
+
+static int pirq_esc_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
+{
+       unsigned long flags;
+       int reg;
+       u8 x;
+
+       reg = pirq;
+       if (reg >= 1 && reg <= 4)
+               reg += PC_CONF_I82374_PIRQ_ROUTE_CONTROL - 1;
+
+       raw_spin_lock_irqsave(&pc_conf_lock, flags);
+       pc_conf_set(PC_CONF_I82374_ESC_ID, PC_CONF_I82374_ESC_ID_KEY);
+       x = pc_conf_get(reg);
+       pc_conf_set(PC_CONF_I82374_ESC_ID, 0);
+       raw_spin_unlock_irqrestore(&pc_conf_lock, flags);
+       return (x < 16) ? x : 0;
+}
+
+static int pirq_esc_set(struct pci_dev *router, struct pci_dev *dev, int pirq,
+                      int irq)
+{
+       unsigned long flags;
+       int reg;
+
+       reg = pirq;
+       if (reg >= 1 && reg <= 4)
+               reg += PC_CONF_I82374_PIRQ_ROUTE_CONTROL - 1;
+
+       raw_spin_lock_irqsave(&pc_conf_lock, flags);
+       pc_conf_set(PC_CONF_I82374_ESC_ID, PC_CONF_I82374_ESC_ID_KEY);
+       pc_conf_set(reg, irq);
+       pc_conf_set(PC_CONF_I82374_ESC_ID, 0);
+       raw_spin_unlock_irqrestore(&pc_conf_lock, flags);
+       return 1;
+}
+
 /*
  * The Intel PIIX4 pirq rules are fairly simple: "pirq" is
  * just a pointer to the config space.
@@ -237,6 +444,50 @@ static int pirq_piix_set(struct pci_dev *router, struct pci_dev *dev, int pirq,
        return 1;
 }
 
+/*
+ *     PIRQ routing for the 82426EX ISA Bridge (IB) ASIC used with the
+ *     Intel 82420EX PCIset.
+ *
+ *     There are only two PIRQ Route Control registers, available in the
+ *     combined 82425EX/82426EX PCI configuration space, at 0x66 and 0x67
+ *     for the PIRQ0# and PIRQ1# lines respectively.  The semantics is
+ *     the same as with the PIIX router.
+ *
+ *     References:
+ *
+ *     "82420EX PCIset Data Sheet, 82425EX PCI System Controller (PSC)
+ *     and 82426EX ISA Bridge (IB)", Intel Corporation, Order Number:
+ *     290488-004, December 1995
+ */
+
+#define PCI_I82426EX_PIRQ_ROUTE_CONTROL        0x66u
+
+static int pirq_ib_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
+{
+       int reg;
+       u8 x;
+
+       reg = pirq;
+       if (reg >= 1 && reg <= 2)
+               reg += PCI_I82426EX_PIRQ_ROUTE_CONTROL - 1;
+
+       pci_read_config_byte(router, reg, &x);
+       return (x < 16) ? x : 0;
+}
+
+static int pirq_ib_set(struct pci_dev *router, struct pci_dev *dev, int pirq,
+                      int irq)
+{
+       int reg;
+
+       reg = pirq;
+       if (reg >= 1 && reg <= 2)
+               reg += PCI_I82426EX_PIRQ_ROUTE_CONTROL - 1;
+
+       pci_write_config_byte(router, reg, irq);
+       return 1;
+}
+
 /*
  * The VIA pirq rules are nibble-based, like ALI,
  * but without the ugly irq number munging.
@@ -549,6 +800,11 @@ static __init int intel_router_probe(struct irq_router *r, struct pci_dev *route
                return 0;
 
        switch (device) {
+       case PCI_DEVICE_ID_INTEL_82375:
+               r->name = "PCEB/ESC";
+               r->get = pirq_esc_get;
+               r->set = pirq_esc_set;
+               return 1;
        case PCI_DEVICE_ID_INTEL_82371FB_0:
        case PCI_DEVICE_ID_INTEL_82371SB_0:
        case PCI_DEVICE_ID_INTEL_82371AB_0:
@@ -594,6 +850,11 @@ static __init int intel_router_probe(struct irq_router *r, struct pci_dev *route
                r->get = pirq_piix_get;
                r->set = pirq_piix_set;
                return 1;
+       case PCI_DEVICE_ID_INTEL_82425:
+               r->name = "PSC/IB";
+               r->get = pirq_ib_get;
+               r->set = pirq_ib_set;
+               return 1;
        }
 
        if ((device >= PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MIN && 
@@ -745,6 +1006,12 @@ static __init int ite_router_probe(struct irq_router *r, struct pci_dev *router,
 static __init int ali_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
 {
        switch (device) {
+       case PCI_DEVICE_ID_AL_M1489:
+               r->name = "FinALi";
+               r->get = pirq_finali_get;
+               r->set = pirq_finali_set;
+               r->lvl = pirq_finali_lvl;
+               return 1;
        case PCI_DEVICE_ID_AL_M1533:
        case PCI_DEVICE_ID_AL_M1563:
                r->name = "ALI";
@@ -968,11 +1235,17 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
        } else if (r->get && (irq = r->get(pirq_router_dev, dev, pirq)) && \
        ((!(pci_probe & PCI_USE_PIRQ_MASK)) || ((1 << irq) & mask))) {
                msg = "found";
-               elcr_set_level_irq(irq);
+               if (r->lvl)
+                       r->lvl(pirq_router_dev, dev, pirq, irq);
+               else
+                       elcr_set_level_irq(irq);
        } else if (newirq && r->set &&
                (dev->class >> 8) != PCI_CLASS_DISPLAY_VGA) {
                if (r->set(pirq_router_dev, dev, pirq, newirq)) {
-                       elcr_set_level_irq(newirq);
+                       if (r->lvl)
+                               r->lvl(pirq_router_dev, dev, pirq, newirq);
+                       else
+                               elcr_set_level_irq(newirq);
                        msg = "assigned";
                        irq = newirq;
                }
index 3a070e7..6665f88 100644 (file)
@@ -58,19 +58,20 @@ static void msr_restore_context(struct saved_context *ctxt)
 }
 
 /**
- *     __save_processor_state - save CPU registers before creating a
- *             hibernation image and before restoring the memory state from it
- *     @ctxt - structure to store the registers contents in
+ * __save_processor_state() - Save CPU registers before creating a
+ *                             hibernation image and before restoring
+ *                             the memory state from it
+ * @ctxt: Structure to store the registers contents in.
  *
- *     NOTE: If there is a CPU register the modification of which by the
- *     boot kernel (ie. the kernel used for loading the hibernation image)
- *     might affect the operations of the restored target kernel (ie. the one
- *     saved in the hibernation image), then its contents must be saved by this
- *     function.  In other words, if kernel A is hibernated and different
- *     kernel B is used for loading the hibernation image into memory, the
- *     kernel A's __save_processor_state() function must save all registers
- *     needed by kernel A, so that it can operate correctly after the resume
- *     regardless of what kernel B does in the meantime.
+ * NOTE: If there is a CPU register the modification of which by the
+ * boot kernel (ie. the kernel used for loading the hibernation image)
+ * might affect the operations of the restored target kernel (ie. the one
+ * saved in the hibernation image), then its contents must be saved by this
+ * function.  In other words, if kernel A is hibernated and different
+ * kernel B is used for loading the hibernation image into memory, the
+ * kernel A's __save_processor_state() function must save all registers
+ * needed by kernel A, so that it can operate correctly after the resume
+ * regardless of what kernel B does in the meantime.
  */
 static void __save_processor_state(struct saved_context *ctxt)
 {
@@ -181,9 +182,9 @@ static void fix_processor_context(void)
 }
 
 /**
- * __restore_processor_state - restore the contents of CPU registers saved
- *                             by __save_processor_state()
- * @ctxt - structure to load the registers contents from
+ * __restore_processor_state() - Restore the contents of CPU registers saved
+ *                               by __save_processor_state()
+ * @ctxt: Structure to load the registers contents from.
  *
  * The asm code that gets us here will have restored a usable GDT, although
  * it will be pointing to the wrong alias.
index fd1ab80..a4cf678 100644 (file)
@@ -10,6 +10,7 @@ BEGIN {
 
 /^GNU objdump/ {
        verstr = ""
+       gsub(/\(.*\)/, "");
        for (i = 3; i <= NF; i++)
                if (match($(i), "^[0-9]")) {
                        verstr = $(i);
index 04c5a44..27c8220 100644 (file)
@@ -26,6 +26,9 @@ static struct relocs relocs32;
 #if ELF_BITS == 64
 static struct relocs relocs32neg;
 static struct relocs relocs64;
+#define FMT PRIu64
+#else
+#define FMT PRIu32
 #endif
 
 struct section {
@@ -57,12 +60,12 @@ static const char * const sym_regex_kernel[S_NSYMTYPES] = {
        [S_REL] =
        "^(__init_(begin|end)|"
        "__x86_cpu_dev_(start|end)|"
-       "(__parainstructions|__alt_instructions)(|_end)|"
-       "(__iommu_table|__apicdrivers|__smp_locks)(|_end)|"
+       "(__parainstructions|__alt_instructions)(_end)?|"
+       "(__iommu_table|__apicdrivers|__smp_locks)(_end)?|"
        "__(start|end)_pci_.*|"
        "__(start|end)_builtin_fw|"
-       "__(start|stop)___ksymtab(|_gpl)|"
-       "__(start|stop)___kcrctab(|_gpl)|"
+       "__(start|stop)___ksymtab(_gpl)?|"
+       "__(start|stop)___kcrctab(_gpl)?|"
        "__(start|stop)___param|"
        "__(start|stop)___modver|"
        "__(start|stop)___bug_table|"
@@ -389,7 +392,7 @@ static void read_ehdr(FILE *fp)
                Elf_Shdr shdr;
 
                if (fseek(fp, ehdr.e_shoff, SEEK_SET) < 0)
-                       die("Seek to %d failed: %s\n", ehdr.e_shoff, strerror(errno));
+                       die("Seek to %" FMT " failed: %s\n", ehdr.e_shoff, strerror(errno));
 
                if (fread(&shdr, sizeof(shdr), 1, fp) != 1)
                        die("Cannot read initial ELF section header: %s\n", strerror(errno));
@@ -412,17 +415,17 @@ static void read_shdrs(FILE *fp)
 
        secs = calloc(shnum, sizeof(struct section));
        if (!secs) {
-               die("Unable to allocate %d section headers\n",
+               die("Unable to allocate %ld section headers\n",
                    shnum);
        }
        if (fseek(fp, ehdr.e_shoff, SEEK_SET) < 0) {
-               die("Seek to %d failed: %s\n",
-                       ehdr.e_shoff, strerror(errno));
+               die("Seek to %" FMT " failed: %s\n",
+                   ehdr.e_shoff, strerror(errno));
        }
        for (i = 0; i < shnum; i++) {
                struct section *sec = &secs[i];
                if (fread(&shdr, sizeof(shdr), 1, fp) != 1)
-                       die("Cannot read ELF section headers %d/%d: %s\n",
+                       die("Cannot read ELF section headers %d/%ld: %s\n",
                            i, shnum, strerror(errno));
                sec->shdr.sh_name      = elf_word_to_cpu(shdr.sh_name);
                sec->shdr.sh_type      = elf_word_to_cpu(shdr.sh_type);
@@ -450,12 +453,12 @@ static void read_strtabs(FILE *fp)
                }
                sec->strtab = malloc(sec->shdr.sh_size);
                if (!sec->strtab) {
-                       die("malloc of %d bytes for strtab failed\n",
-                               sec->shdr.sh_size);
+                       die("malloc of %" FMT " bytes for strtab failed\n",
+                           sec->shdr.sh_size);
                }
                if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0) {
-                       die("Seek to %d failed: %s\n",
-                               sec->shdr.sh_offset, strerror(errno));
+                       die("Seek to %" FMT " failed: %s\n",
+                           sec->shdr.sh_offset, strerror(errno));
                }
                if (fread(sec->strtab, 1, sec->shdr.sh_size, fp)
                    != sec->shdr.sh_size) {
@@ -475,12 +478,12 @@ static void read_symtabs(FILE *fp)
                }
                sec->symtab = malloc(sec->shdr.sh_size);
                if (!sec->symtab) {
-                       die("malloc of %d bytes for symtab failed\n",
-                               sec->shdr.sh_size);
+                       die("malloc of %" FMT " bytes for symtab failed\n",
+                           sec->shdr.sh_size);
                }
                if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0) {
-                       die("Seek to %d failed: %s\n",
-                               sec->shdr.sh_offset, strerror(errno));
+                       die("Seek to %" FMT " failed: %s\n",
+                           sec->shdr.sh_offset, strerror(errno));
                }
                if (fread(sec->symtab, 1, sec->shdr.sh_size, fp)
                    != sec->shdr.sh_size) {
@@ -508,12 +511,12 @@ static void read_relocs(FILE *fp)
                }
                sec->reltab = malloc(sec->shdr.sh_size);
                if (!sec->reltab) {
-                       die("malloc of %d bytes for relocs failed\n",
-                               sec->shdr.sh_size);
+                       die("malloc of %" FMT " bytes for relocs failed\n",
+                           sec->shdr.sh_size);
                }
                if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0) {
-                       die("Seek to %d failed: %s\n",
-                               sec->shdr.sh_offset, strerror(errno));
+                       die("Seek to %" FMT " failed: %s\n",
+                           sec->shdr.sh_offset, strerror(errno));
                }
                if (fread(sec->reltab, 1, sec->shdr.sh_size, fp)
                    != sec->shdr.sh_size) {
index 43c83c0..4c49c82 100644 (file)
@@ -17,6 +17,7 @@
 #include <regex.h>
 #include <tools/le_byteshift.h>
 
+__attribute__((__format__(printf, 1, 2)))
 void die(char *fmt, ...) __attribute__((noreturn));
 
 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
index 2332b21..3878880 100644 (file)
@@ -327,7 +327,6 @@ config XTENSA_PLATFORM_ISS
 
 config XTENSA_PLATFORM_XT2000
        bool "XT2000"
-       select HAVE_IDE
        help
          XT2000 is the name of Tensilica's feature-rich emulation platform.
          This hardware is capable of running a full Linux distribution.
index a48bf2d..764b54b 100644 (file)
@@ -33,8 +33,6 @@ DECLARE_PER_CPU(unsigned long, nmi_count);
 
 asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs)
 {
-       int irq = irq_find_mapping(NULL, hwirq);
-
 #ifdef CONFIG_DEBUG_STACKOVERFLOW
        /* Debugging check for stack overflow: is there less than 1KB free? */
        {
@@ -48,7 +46,7 @@ asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs)
                               sp - sizeof(struct thread_info));
        }
 #endif
-       generic_handle_irq(irq);
+       generic_handle_domain_irq(NULL, hwirq);
 }
 
 int arch_show_interrupts(struct seq_file *p, int prec)
index fd732ae..bac87d7 100644 (file)
@@ -114,16 +114,6 @@ config BLK_DEV_THROTTLING_LOW
 
        Note, this is an experimental interface and could be changed someday.
 
-config BLK_CMDLINE_PARSER
-       bool "Block device command line partition parser"
-       help
-       Enabling this option allows you to specify the partition layout from
-       the kernel boot args.  This is typically of use for embedded devices
-       which don't otherwise have any standardized method for listing the
-       partitions on a block device.
-
-       See Documentation/block/cmdline-partition.rst for more information.
-
 config BLK_WBT
        bool "Enable support for block device writeback throttling"
        help
@@ -251,4 +241,8 @@ config BLK_MQ_RDMA
 config BLK_PM
        def_bool BLOCK && PM
 
+# do not use in new code
+config BLOCK_HOLDER_DEPRECATED
+       bool
+
 source "block/Kconfig.iosched"
index 64053d6..2f2158e 100644 (file)
@@ -9,12 +9,6 @@ config MQ_IOSCHED_DEADLINE
        help
          MQ version of the deadline IO scheduler.
 
-config MQ_IOSCHED_DEADLINE_CGROUP
-       tristate
-       default y
-       depends on MQ_IOSCHED_DEADLINE
-       depends on BLK_CGROUP
-
 config MQ_IOSCHED_KYBER
        tristate "Kyber I/O scheduler"
        default y
index bfbe4e1..1d0d466 100644 (file)
@@ -22,13 +22,10 @@ obj-$(CONFIG_BLK_CGROUP_IOPRIO)     += blk-ioprio.o
 obj-$(CONFIG_BLK_CGROUP_IOLATENCY)     += blk-iolatency.o
 obj-$(CONFIG_BLK_CGROUP_IOCOST)        += blk-iocost.o
 obj-$(CONFIG_MQ_IOSCHED_DEADLINE)      += mq-deadline.o
-mq-deadline-y += mq-deadline-main.o
-mq-deadline-$(CONFIG_MQ_IOSCHED_DEADLINE_CGROUP)+= mq-deadline-cgroup.o
 obj-$(CONFIG_MQ_IOSCHED_KYBER) += kyber-iosched.o
 bfq-y                          := bfq-iosched.o bfq-wf2q.o bfq-cgroup.o
 obj-$(CONFIG_IOSCHED_BFQ)      += bfq.o
 
-obj-$(CONFIG_BLK_CMDLINE_PARSER)       += cmdline-parser.o
 obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o blk-integrity.o
 obj-$(CONFIG_BLK_DEV_INTEGRITY_T10)    += t10-pi.o
 obj-$(CONFIG_BLK_MQ_PCI)       += blk-mq-pci.o
@@ -42,3 +39,4 @@ obj-$(CONFIG_BLK_SED_OPAL)    += sed-opal.o
 obj-$(CONFIG_BLK_PM)           += blk-pm.o
 obj-$(CONFIG_BLK_INLINE_ENCRYPTION)    += keyslot-manager.o blk-crypto.o
 obj-$(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK)   += blk-crypto-fallback.o
+obj-$(CONFIG_BLOCK_HOLDER_DEPRECATED)  += holder.o
index 7279559..480e1a1 100644 (file)
@@ -2361,6 +2361,9 @@ static int bfq_request_merge(struct request_queue *q, struct request **req,
        __rq = bfq_find_rq_fmerge(bfqd, bio, q);
        if (__rq && elv_bio_merge_ok(__rq, bio)) {
                *req = __rq;
+
+               if (blk_discard_mergable(__rq))
+                       return ELEVATOR_DISCARD_MERGE;
                return ELEVATOR_FRONT_MERGE;
        }
 
@@ -2505,7 +2508,7 @@ void bfq_end_wr_async_queues(struct bfq_data *bfqd,
        int i, j;
 
        for (i = 0; i < 2; i++)
-               for (j = 0; j < IOPRIO_BE_NR; j++)
+               for (j = 0; j < IOPRIO_NR_LEVELS; j++)
                        if (bfqg->async_bfqq[i][j])
                                bfq_bfqq_end_wr(bfqg->async_bfqq[i][j]);
        if (bfqg->async_idle_bfqq)
@@ -5266,8 +5269,8 @@ bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
        switch (ioprio_class) {
        default:
                pr_err("bdi %s: bfq: bad prio class %d\n",
-                               bdi_dev_name(bfqq->bfqd->queue->backing_dev_info),
-                               ioprio_class);
+                       bdi_dev_name(bfqq->bfqd->queue->disk->bdi),
+                       ioprio_class);
                fallthrough;
        case IOPRIO_CLASS_NONE:
                /*
@@ -5290,10 +5293,10 @@ bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
                break;
        }
 
-       if (bfqq->new_ioprio >= IOPRIO_BE_NR) {
+       if (bfqq->new_ioprio >= IOPRIO_NR_LEVELS) {
                pr_crit("bfq_set_next_ioprio_data: new_ioprio %d\n",
                        bfqq->new_ioprio);
-               bfqq->new_ioprio = IOPRIO_BE_NR;
+               bfqq->new_ioprio = IOPRIO_NR_LEVELS - 1;
        }
 
        bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->new_ioprio);
@@ -5408,7 +5411,7 @@ static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd,
        case IOPRIO_CLASS_RT:
                return &bfqg->async_bfqq[0][ioprio];
        case IOPRIO_CLASS_NONE:
-               ioprio = IOPRIO_NORM;
+               ioprio = IOPRIO_BE_NORM;
                fallthrough;
        case IOPRIO_CLASS_BE:
                return &bfqg->async_bfqq[1][ioprio];
@@ -6822,7 +6825,7 @@ void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg)
        int i, j;
 
        for (i = 0; i < 2; i++)
-               for (j = 0; j < IOPRIO_BE_NR; j++)
+               for (j = 0; j < IOPRIO_NR_LEVELS; j++)
                        __bfq_put_async_bfqq(bfqd, &bfqg->async_bfqq[i][j]);
 
        __bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq);
index 99c2a3c..a73488e 100644 (file)
@@ -931,7 +931,7 @@ struct bfq_group {
 
        void *bfqd;
 
-       struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR];
+       struct bfq_queue *async_bfqq[2][IOPRIO_NR_LEVELS];
        struct bfq_queue *async_idle_bfqq;
 
        struct bfq_entity *my_entity;
@@ -948,15 +948,13 @@ struct bfq_group {
        struct bfq_entity entity;
        struct bfq_sched_data sched_data;
 
-       struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR];
+       struct bfq_queue *async_bfqq[2][IOPRIO_NR_LEVELS];
        struct bfq_queue *async_idle_bfqq;
 
        struct rb_root rq_pos_tree;
 };
 #endif
 
-struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity);
-
 /* --------------- main algorithm interface ----------------- */
 
 #define BFQ_SERVICE_TREE_INIT  ((struct bfq_service_tree)              \
index 7a462df..b74cc0d 100644 (file)
@@ -505,7 +505,7 @@ static void bfq_active_insert(struct bfq_service_tree *st,
  */
 unsigned short bfq_ioprio_to_weight(int ioprio)
 {
-       return (IOPRIO_BE_NR - ioprio) * BFQ_WEIGHT_CONVERSION_COEFF;
+       return (IOPRIO_NR_LEVELS - ioprio) * BFQ_WEIGHT_CONVERSION_COEFF;
 }
 
 /**
@@ -514,12 +514,12 @@ unsigned short bfq_ioprio_to_weight(int ioprio)
  *
  * To preserve as much as possible the old only-ioprio user interface,
  * 0 is used as an escape ioprio value for weights (numerically) equal or
- * larger than IOPRIO_BE_NR * BFQ_WEIGHT_CONVERSION_COEFF.
+ * larger than IOPRIO_NR_LEVELS * BFQ_WEIGHT_CONVERSION_COEFF.
  */
 static unsigned short bfq_weight_to_ioprio(int weight)
 {
        return max_t(int, 0,
-                    IOPRIO_BE_NR * BFQ_WEIGHT_CONVERSION_COEFF - weight);
+                    IOPRIO_NR_LEVELS * BFQ_WEIGHT_CONVERSION_COEFF - weight);
 }
 
 static void bfq_get_entity(struct bfq_entity *entity)
index 4b4eb89..6b47cdd 100644 (file)
@@ -104,8 +104,7 @@ void bio_integrity_free(struct bio *bio)
        struct bio_set *bs = bio->bi_pool;
 
        if (bip->bip_flags & BIP_BLOCK_INTEGRITY)
-               kfree(page_address(bip->bip_vec->bv_page) +
-                     bip->bip_vec->bv_offset);
+               kfree(bvec_virt(bip->bip_vec));
 
        __bio_integrity_free(bs, bip);
        bio->bi_integrity = NULL;
@@ -163,27 +162,23 @@ static blk_status_t bio_integrity_process(struct bio *bio,
        struct bio_vec bv;
        struct bio_integrity_payload *bip = bio_integrity(bio);
        blk_status_t ret = BLK_STS_OK;
-       void *prot_buf = page_address(bip->bip_vec->bv_page) +
-               bip->bip_vec->bv_offset;
 
        iter.disk_name = bio->bi_bdev->bd_disk->disk_name;
        iter.interval = 1 << bi->interval_exp;
        iter.seed = proc_iter->bi_sector;
-       iter.prot_buf = prot_buf;
+       iter.prot_buf = bvec_virt(bip->bip_vec);
 
        __bio_for_each_segment(bv, bio, bviter, *proc_iter) {
-               void *kaddr = kmap_atomic(bv.bv_page);
+               void *kaddr = bvec_kmap_local(&bv);
 
-               iter.data_buf = kaddr + bv.bv_offset;
+               iter.data_buf = kaddr;
                iter.data_size = bv.bv_len;
-
                ret = proc_fn(&iter);
-               if (ret) {
-                       kunmap_atomic(kaddr);
-                       return ret;
-               }
+               kunmap_local(kaddr);
+
+               if (ret)
+                       break;
 
-               kunmap_atomic(kaddr);
        }
        return ret;
 }
index 1fab762..265bff6 100644 (file)
@@ -495,16 +495,11 @@ EXPORT_SYMBOL(bio_kmalloc);
 
 void zero_fill_bio(struct bio *bio)
 {
-       unsigned long flags;
        struct bio_vec bv;
        struct bvec_iter iter;
 
-       bio_for_each_segment(bv, bio, iter) {
-               char *data = bvec_kmap_irq(&bv, &flags);
-               memset(data, 0, bv.bv_len);
-               flush_dcache_page(bv.bv_page);
-               bvec_kunmap_irq(data, &flags);
-       }
+       bio_for_each_segment(bv, bio, iter)
+               memzero_bvec(&bv);
 }
 EXPORT_SYMBOL(zero_fill_bio);
 
@@ -979,6 +974,14 @@ static int bio_iov_bvec_set_append(struct bio *bio, struct iov_iter *iter)
        return 0;
 }
 
+static void bio_put_pages(struct page **pages, size_t size, size_t off)
+{
+       size_t i, nr = DIV_ROUND_UP(size + (off & ~PAGE_MASK), PAGE_SIZE);
+
+       for (i = 0; i < nr; i++)
+               put_page(pages[i]);
+}
+
 #define PAGE_PTRS_PER_BVEC     (sizeof(struct bio_vec) / sizeof(struct page *))
 
 /**
@@ -1023,8 +1026,10 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
                        if (same_page)
                                put_page(page);
                } else {
-                       if (WARN_ON_ONCE(bio_full(bio, len)))
-                                return -EINVAL;
+                       if (WARN_ON_ONCE(bio_full(bio, len))) {
+                               bio_put_pages(pages + i, left, offset);
+                               return -EINVAL;
+                       }
                        __bio_add_page(bio, page, len, offset);
                }
                offset = 0;
@@ -1069,6 +1074,7 @@ static int __bio_iov_append_get_pages(struct bio *bio, struct iov_iter *iter)
                len = min_t(size_t, PAGE_SIZE - offset, left);
                if (bio_add_hw_page(q, bio, page, len, offset,
                                max_append_sectors, &same_page) != len) {
+                       bio_put_pages(pages + i, left, offset);
                        ret = -EINVAL;
                        break;
                }
@@ -1191,27 +1197,15 @@ EXPORT_SYMBOL(bio_advance);
 void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
                        struct bio *src, struct bvec_iter *src_iter)
 {
-       struct bio_vec src_bv, dst_bv;
-       void *src_p, *dst_p;
-       unsigned bytes;
-
        while (src_iter->bi_size && dst_iter->bi_size) {
-               src_bv = bio_iter_iovec(src, *src_iter);
-               dst_bv = bio_iter_iovec(dst, *dst_iter);
-
-               bytes = min(src_bv.bv_len, dst_bv.bv_len);
-
-               src_p = kmap_atomic(src_bv.bv_page);
-               dst_p = kmap_atomic(dst_bv.bv_page);
-
-               memcpy(dst_p + dst_bv.bv_offset,
-                      src_p + src_bv.bv_offset,
-                      bytes);
-
-               kunmap_atomic(dst_p);
-               kunmap_atomic(src_p);
-
-               flush_dcache_page(dst_bv.bv_page);
+               struct bio_vec src_bv = bio_iter_iovec(src, *src_iter);
+               struct bio_vec dst_bv = bio_iter_iovec(dst, *dst_iter);
+               unsigned int bytes = min(src_bv.bv_len, dst_bv.bv_len);
+               void *src_buf;
+
+               src_buf = bvec_kmap_local(&src_bv);
+               memcpy_to_bvec(&dst_bv, src_buf);
+               kunmap_local(src_buf);
 
                bio_advance_iter_single(src, src_iter, bytes);
                bio_advance_iter_single(dst, dst_iter, bytes);
index 575d7a2..3c88a79 100644 (file)
@@ -489,10 +489,9 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css,
 
 const char *blkg_dev_name(struct blkcg_gq *blkg)
 {
-       /* some drivers (floppy) instantiate a queue w/o disk registered */
-       if (blkg->q->backing_dev_info->dev)
-               return bdi_dev_name(blkg->q->backing_dev_info);
-       return NULL;
+       if (!blkg->q->disk || !blkg->q->disk->bdi->dev)
+               return NULL;
+       return bdi_dev_name(blkg->q->disk->bdi);
 }
 
 /**
@@ -790,6 +789,7 @@ static void blkcg_rstat_flush(struct cgroup_subsys_state *css, int cpu)
                struct blkcg_gq *parent = blkg->parent;
                struct blkg_iostat_set *bisc = per_cpu_ptr(blkg->iostat_cpu, cpu);
                struct blkg_iostat cur, delta;
+               unsigned long flags;
                unsigned int seq;
 
                /* fetch the current per-cpu values */
@@ -799,21 +799,21 @@ static void blkcg_rstat_flush(struct cgroup_subsys_state *css, int cpu)
                } while (u64_stats_fetch_retry(&bisc->sync, seq));
 
                /* propagate percpu delta to global */
-               u64_stats_update_begin(&blkg->iostat.sync);
+               flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync);
                blkg_iostat_set(&delta, &cur);
                blkg_iostat_sub(&delta, &bisc->last);
                blkg_iostat_add(&blkg->iostat.cur, &delta);
                blkg_iostat_add(&bisc->last, &delta);
-               u64_stats_update_end(&blkg->iostat.sync);
+               u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags);
 
                /* propagate global delta to parent (unless that's root) */
                if (parent && parent->parent) {
-                       u64_stats_update_begin(&parent->iostat.sync);
+                       flags = u64_stats_update_begin_irqsave(&parent->iostat.sync);
                        blkg_iostat_set(&delta, &blkg->iostat.cur);
                        blkg_iostat_sub(&delta, &blkg->iostat.last);
                        blkg_iostat_add(&parent->iostat.cur, &delta);
                        blkg_iostat_add(&blkg->iostat.last, &delta);
-                       u64_stats_update_end(&parent->iostat.sync);
+                       u64_stats_update_end_irqrestore(&parent->iostat.sync, flags);
                }
        }
 
@@ -848,6 +848,7 @@ static void blkcg_fill_root_iostats(void)
                memset(&tmp, 0, sizeof(tmp));
                for_each_possible_cpu(cpu) {
                        struct disk_stats *cpu_dkstats;
+                       unsigned long flags;
 
                        cpu_dkstats = per_cpu_ptr(bdev->bd_stats, cpu);
                        tmp.ios[BLKG_IOSTAT_READ] +=
@@ -864,104 +865,86 @@ static void blkcg_fill_root_iostats(void)
                        tmp.bytes[BLKG_IOSTAT_DISCARD] +=
                                cpu_dkstats->sectors[STAT_DISCARD] << 9;
 
-                       u64_stats_update_begin(&blkg->iostat.sync);
+                       flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync);
                        blkg_iostat_set(&blkg->iostat.cur, &tmp);
-                       u64_stats_update_end(&blkg->iostat.sync);
+                       u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags);
                }
        }
 }
 
-static int blkcg_print_stat(struct seq_file *sf, void *v)
+static void blkcg_print_one_stat(struct blkcg_gq *blkg, struct seq_file *s)
 {
-       struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
-       struct blkcg_gq *blkg;
-
-       if (!seq_css(sf)->parent)
-               blkcg_fill_root_iostats();
-       else
-               cgroup_rstat_flush(blkcg->css.cgroup);
-
-       rcu_read_lock();
-
-       hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
-               struct blkg_iostat_set *bis = &blkg->iostat;
-               const char *dname;
-               char *buf;
-               u64 rbytes, wbytes, rios, wios, dbytes, dios;
-               size_t size = seq_get_buf(sf, &buf), off = 0;
-               int i;
-               bool has_stats = false;
-               unsigned seq;
+       struct blkg_iostat_set *bis = &blkg->iostat;
+       u64 rbytes, wbytes, rios, wios, dbytes, dios;
+       bool has_stats = false;
+       const char *dname;
+       unsigned seq;
+       int i;
 
-               spin_lock_irq(&blkg->q->queue_lock);
+       if (!blkg->online)
+               return;
 
-               if (!blkg->online)
-                       goto skip;
+       dname = blkg_dev_name(blkg);
+       if (!dname)
+               return;
 
-               dname = blkg_dev_name(blkg);
-               if (!dname)
-                       goto skip;
+       seq_printf(s, "%s ", dname);
 
-               /*
-                * Hooray string manipulation, count is the size written NOT
-                * INCLUDING THE \0, so size is now count+1 less than what we
-                * had before, but we want to start writing the next bit from
-                * the \0 so we only add count to buf.
-                */
-               off += scnprintf(buf+off, size-off, "%s ", dname);
+       do {
+               seq = u64_stats_fetch_begin(&bis->sync);
+
+               rbytes = bis->cur.bytes[BLKG_IOSTAT_READ];
+               wbytes = bis->cur.bytes[BLKG_IOSTAT_WRITE];
+               dbytes = bis->cur.bytes[BLKG_IOSTAT_DISCARD];
+               rios = bis->cur.ios[BLKG_IOSTAT_READ];
+               wios = bis->cur.ios[BLKG_IOSTAT_WRITE];
+               dios = bis->cur.ios[BLKG_IOSTAT_DISCARD];
+       } while (u64_stats_fetch_retry(&bis->sync, seq));
+
+       if (rbytes || wbytes || rios || wios) {
+               has_stats = true;
+               seq_printf(s, "rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu",
+                       rbytes, wbytes, rios, wios,
+                       dbytes, dios);
+       }
 
-               do {
-                       seq = u64_stats_fetch_begin(&bis->sync);
+       if (blkcg_debug_stats && atomic_read(&blkg->use_delay)) {
+               has_stats = true;
+               seq_printf(s, " use_delay=%d delay_nsec=%llu",
+                       atomic_read(&blkg->use_delay),
+                       atomic64_read(&blkg->delay_nsec));
+       }
 
-                       rbytes = bis->cur.bytes[BLKG_IOSTAT_READ];
-                       wbytes = bis->cur.bytes[BLKG_IOSTAT_WRITE];
-                       dbytes = bis->cur.bytes[BLKG_IOSTAT_DISCARD];
-                       rios = bis->cur.ios[BLKG_IOSTAT_READ];
-                       wios = bis->cur.ios[BLKG_IOSTAT_WRITE];
-                       dios = bis->cur.ios[BLKG_IOSTAT_DISCARD];
-               } while (u64_stats_fetch_retry(&bis->sync, seq));
+       for (i = 0; i < BLKCG_MAX_POLS; i++) {
+               struct blkcg_policy *pol = blkcg_policy[i];
 
-               if (rbytes || wbytes || rios || wios) {
-                       has_stats = true;
-                       off += scnprintf(buf+off, size-off,
-                                        "rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu",
-                                        rbytes, wbytes, rios, wios,
-                                        dbytes, dios);
-               }
+               if (!blkg->pd[i] || !pol->pd_stat_fn)
+                       continue;
 
-               if (blkcg_debug_stats && atomic_read(&blkg->use_delay)) {
+               if (pol->pd_stat_fn(blkg->pd[i], s))
                        has_stats = true;
-                       off += scnprintf(buf+off, size-off,
-                                        " use_delay=%d delay_nsec=%llu",
-                                        atomic_read(&blkg->use_delay),
-                                       (unsigned long long)atomic64_read(&blkg->delay_nsec));
-               }
+       }
 
-               for (i = 0; i < BLKCG_MAX_POLS; i++) {
-                       struct blkcg_policy *pol = blkcg_policy[i];
-                       size_t written;
+       if (has_stats)
+               seq_printf(s, "\n");
+}
 
-                       if (!blkg->pd[i] || !pol->pd_stat_fn)
-                               continue;
+static int blkcg_print_stat(struct seq_file *sf, void *v)
+{
+       struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
+       struct blkcg_gq *blkg;
 
-                       written = pol->pd_stat_fn(blkg->pd[i], buf+off, size-off);
-                       if (written)
-                               has_stats = true;
-                       off += written;
-               }
+       if (!seq_css(sf)->parent)
+               blkcg_fill_root_iostats();
+       else
+               cgroup_rstat_flush(blkcg->css.cgroup);
 
-               if (has_stats) {
-                       if (off < size - 1) {
-                               off += scnprintf(buf+off, size-off, "\n");
-                               seq_commit(sf, off);
-                       } else {
-                               seq_commit(sf, -1);
-                       }
-               }
-       skip:
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
+               spin_lock_irq(&blkg->q->queue_lock);
+               blkcg_print_one_stat(blkg, sf);
                spin_unlock_irq(&blkg->q->queue_lock);
        }
-
        rcu_read_unlock();
        return 0;
 }
index 0447769..b509873 100644 (file)
@@ -14,7 +14,6 @@
  */
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/backing-dev.h>
 #include <linux/bio.h>
 #include <linux/blkdev.h>
 #include <linux/blk-mq.h>
@@ -122,7 +121,6 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
        rq->internal_tag = BLK_MQ_NO_TAG;
        rq->start_time_ns = ktime_get_ns();
        rq->part = NULL;
-       refcount_set(&rq->ref, 1);
        blk_crypto_rq_set_defaults(rq);
 }
 EXPORT_SYMBOL(blk_rq_init);
@@ -394,10 +392,7 @@ void blk_cleanup_queue(struct request_queue *q)
        /* for synchronous bio-based driver finish in-flight integrity i/o */
        blk_flush_integrity();
 
-       /* @q won't process any more request, flush async actions */
-       del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer);
        blk_sync_queue(q);
-
        if (queue_is_mq(q))
                blk_mq_exit_queue(q);
 
@@ -534,20 +529,14 @@ struct request_queue *blk_alloc_queue(int node_id)
        if (ret)
                goto fail_id;
 
-       q->backing_dev_info = bdi_alloc(node_id);
-       if (!q->backing_dev_info)
-               goto fail_split;
-
        q->stats = blk_alloc_queue_stats();
        if (!q->stats)
-               goto fail_stats;
+               goto fail_split;
 
        q->node = node_id;
 
        atomic_set(&q->nr_active_requests_shared_sbitmap, 0);
 
-       timer_setup(&q->backing_dev_info->laptop_mode_wb_timer,
-                   laptop_mode_timer_fn, 0);
        timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
        INIT_WORK(&q->timeout_work, blk_timeout_work);
        INIT_LIST_HEAD(&q->icq_list);
@@ -572,7 +561,7 @@ struct request_queue *blk_alloc_queue(int node_id)
        if (percpu_ref_init(&q->q_usage_counter,
                                blk_queue_usage_counter_release,
                                PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
-               goto fail_bdi;
+               goto fail_stats;
 
        if (blkcg_init_queue(q))
                goto fail_ref;
@@ -585,10 +574,8 @@ struct request_queue *blk_alloc_queue(int node_id)
 
 fail_ref:
        percpu_ref_exit(&q->q_usage_counter);
-fail_bdi:
-       blk_free_queue_stats(q->stats);
 fail_stats:
-       bdi_put(q->backing_dev_info);
+       blk_free_queue_stats(q->stats);
 fail_split:
        bioset_exit(&q->bio_split);
 fail_id:
index c5bdaaf..103c2e2 100644 (file)
@@ -332,7 +332,7 @@ int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key,
        if (mode->keysize == 0)
                return -EINVAL;
 
-       if (dun_bytes == 0 || dun_bytes > BLK_CRYPTO_MAX_IV_SIZE)
+       if (dun_bytes == 0 || dun_bytes > mode->ivsize)
                return -EINVAL;
 
        if (!is_power_of_2(data_unit_size))
index 1002f6c..4201728 100644 (file)
@@ -262,6 +262,11 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
        spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
 }
 
+bool is_flush_rq(struct request *rq)
+{
+       return rq->end_io == flush_end_io;
+}
+
 /**
  * blk_kick_flush - consider issuing flush request
  * @q: request_queue being kicked
@@ -329,6 +334,14 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
        flush_rq->rq_flags |= RQF_FLUSH_SEQ;
        flush_rq->rq_disk = first_rq->rq_disk;
        flush_rq->end_io = flush_end_io;
+       /*
+        * Order WRITE ->end_io and WRITE rq->ref, and its pair is the one
+        * implied in refcount_inc_not_zero() called from
+        * blk_mq_find_and_get_req(), which orders WRITE/READ flush_rq->ref
+        * and READ flush_rq->end_io
+        */
+       smp_wmb();
+       refcount_set(&flush_rq->ref, 1);
 
        blk_flush_queue_rq(flush_rq, false);
 }
index 410da06..69a1217 100644 (file)
@@ -431,13 +431,15 @@ void blk_integrity_unregister(struct gendisk *disk)
 }
 EXPORT_SYMBOL(blk_integrity_unregister);
 
-void blk_integrity_add(struct gendisk *disk)
+int blk_integrity_add(struct gendisk *disk)
 {
-       if (kobject_init_and_add(&disk->integrity_kobj, &integrity_ktype,
-                                &disk_to_dev(disk)->kobj, "%s", "integrity"))
-               return;
+       int ret;
 
-       kobject_uevent(&disk->integrity_kobj, KOBJ_ADD);
+       ret = kobject_init_and_add(&disk->integrity_kobj, &integrity_ktype,
+                                  &disk_to_dev(disk)->kobj, "%s", "integrity");
+       if (!ret)
+               kobject_uevent(&disk->integrity_kobj, KOBJ_ADD);
+       return ret;
 }
 
 void blk_integrity_del(struct gendisk *disk)
index c2d6bc8..b3880e4 100644 (file)
@@ -1440,16 +1440,17 @@ static int iocg_wake_fn(struct wait_queue_entry *wq_entry, unsigned mode,
                return -1;
 
        iocg_commit_bio(ctx->iocg, wait->bio, wait->abs_cost, cost);
+       wait->committed = true;
 
        /*
         * autoremove_wake_function() removes the wait entry only when it
-        * actually changed the task state.  We want the wait always
-        * removed.  Remove explicitly and use default_wake_function().
+        * actually changed the task state. We want the wait always removed.
+        * Remove explicitly and use default_wake_function(). Note that the
+        * order of operations is important as finish_wait() tests whether
+        * @wq_entry is removed without grabbing the lock.
         */
-       list_del_init(&wq_entry->entry);
-       wait->committed = true;
-
        default_wake_function(wq_entry, mode, flags, key);
+       list_del_init_careful(&wq_entry->entry);
        return 0;
 }
 
@@ -2987,34 +2988,29 @@ static void ioc_pd_free(struct blkg_policy_data *pd)
        kfree(iocg);
 }
 
-static size_t ioc_pd_stat(struct blkg_policy_data *pd, char *buf, size_t size)
+static bool ioc_pd_stat(struct blkg_policy_data *pd, struct seq_file *s)
 {
        struct ioc_gq *iocg = pd_to_iocg(pd);
        struct ioc *ioc = iocg->ioc;
-       size_t pos = 0;
 
        if (!ioc->enabled)
-               return 0;
+               return false;
 
        if (iocg->level == 0) {
                unsigned vp10k = DIV64_U64_ROUND_CLOSEST(
                        ioc->vtime_base_rate * 10000,
                        VTIME_PER_USEC);
-               pos += scnprintf(buf + pos, size - pos, " cost.vrate=%u.%02u",
-                                 vp10k / 100, vp10k % 100);
+               seq_printf(s, " cost.vrate=%u.%02u", vp10k / 100, vp10k % 100);
        }
 
-       pos += scnprintf(buf + pos, size - pos, " cost.usage=%llu",
-                        iocg->last_stat.usage_us);
+       seq_printf(s, " cost.usage=%llu", iocg->last_stat.usage_us);
 
        if (blkcg_debug_stats)
-               pos += scnprintf(buf + pos, size - pos,
-                                " cost.wait=%llu cost.indebt=%llu cost.indelay=%llu",
-                                iocg->last_stat.wait_us,
-                                iocg->last_stat.indebt_us,
-                                iocg->last_stat.indelay_us);
-
-       return pos;
+               seq_printf(s, " cost.wait=%llu cost.indebt=%llu cost.indelay=%llu",
+                       iocg->last_stat.wait_us,
+                       iocg->last_stat.indebt_us,
+                       iocg->last_stat.indelay_us);
+       return true;
 }
 
 static u64 ioc_weight_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
@@ -3060,19 +3056,19 @@ static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf,
                if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
                        return -EINVAL;
 
-               spin_lock(&blkcg->lock);
+               spin_lock_irq(&blkcg->lock);
                iocc->dfl_weight = v * WEIGHT_ONE;
                hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
                        struct ioc_gq *iocg = blkg_to_iocg(blkg);
 
                        if (iocg) {
-                               spin_lock_irq(&iocg->ioc->lock);
+                               spin_lock(&iocg->ioc->lock);
                                ioc_now(iocg->ioc, &now);
                                weight_updated(iocg, &now);
-                               spin_unlock_irq(&iocg->ioc->lock);
+                               spin_unlock(&iocg->ioc->lock);
                        }
                }
-               spin_unlock(&blkcg->lock);
+               spin_unlock_irq(&blkcg->lock);
 
                return nbytes;
        }
index 81be009..c0545f9 100644 (file)
@@ -833,7 +833,11 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
 
        enable = iolatency_set_min_lat_nsec(blkg, lat_val);
        if (enable) {
-               WARN_ON_ONCE(!blk_get_queue(blkg->q));
+               if (!blk_get_queue(blkg->q)) {
+                       ret = -ENODEV;
+                       goto out;
+               }
+
                blkg_get(blkg);
        }
 
@@ -886,8 +890,7 @@ static int iolatency_print_limit(struct seq_file *sf, void *v)
        return 0;
 }
 
-static size_t iolatency_ssd_stat(struct iolatency_grp *iolat, char *buf,
-                                size_t size)
+static bool iolatency_ssd_stat(struct iolatency_grp *iolat, struct seq_file *s)
 {
        struct latency_stat stat;
        int cpu;
@@ -902,39 +905,40 @@ static size_t iolatency_ssd_stat(struct iolatency_grp *iolat, char *buf,
        preempt_enable();
 
        if (iolat->rq_depth.max_depth == UINT_MAX)
-               return scnprintf(buf, size, " missed=%llu total=%llu depth=max",
-                                (unsigned long long)stat.ps.missed,
-                                (unsigned long long)stat.ps.total);
-       return scnprintf(buf, size, " missed=%llu total=%llu depth=%u",
-                        (unsigned long long)stat.ps.missed,
-                        (unsigned long long)stat.ps.total,
-                        iolat->rq_depth.max_depth);
+               seq_printf(s, " missed=%llu total=%llu depth=max",
+                       (unsigned long long)stat.ps.missed,
+                       (unsigned long long)stat.ps.total);
+       else
+               seq_printf(s, " missed=%llu total=%llu depth=%u",
+                       (unsigned long long)stat.ps.missed,
+                       (unsigned long long)stat.ps.total,
+                       iolat->rq_depth.max_depth);
+       return true;
 }
 
-static size_t iolatency_pd_stat(struct blkg_policy_data *pd, char *buf,
-                               size_t size)
+static bool iolatency_pd_stat(struct blkg_policy_data *pd, struct seq_file *s)
 {
        struct iolatency_grp *iolat = pd_to_lat(pd);
        unsigned long long avg_lat;
        unsigned long long cur_win;
 
        if (!blkcg_debug_stats)
-               return 0;
+               return false;
 
        if (iolat->ssd)
-               return iolatency_ssd_stat(iolat, buf, size);
+               return iolatency_ssd_stat(iolat, s);
 
        avg_lat = div64_u64(iolat->lat_avg, NSEC_PER_USEC);
        cur_win = div64_u64(iolat->cur_win_nsec, NSEC_PER_MSEC);
        if (iolat->rq_depth.max_depth == UINT_MAX)
-               return scnprintf(buf, size, " depth=max avg_lat=%llu win=%llu",
-                                avg_lat, cur_win);
-
-       return scnprintf(buf, size, " depth=%u avg_lat=%llu win=%llu",
-                        iolat->rq_depth.max_depth, avg_lat, cur_win);
+               seq_printf(s, " depth=max avg_lat=%llu win=%llu",
+                       avg_lat, cur_win);
+       else
+               seq_printf(s, " depth=%u avg_lat=%llu win=%llu",
+                       iolat->rq_depth.max_depth, avg_lat, cur_win);
+       return true;
 }
 
-
 static struct blkg_policy_data *iolatency_pd_alloc(gfp_t gfp,
                                                   struct request_queue *q,
                                                   struct blkcg *blkcg)
index 3743158..d1448aa 100644 (file)
@@ -400,7 +400,7 @@ static void bio_copy_kern_endio_read(struct bio *bio)
        struct bvec_iter_all iter_all;
 
        bio_for_each_segment_all(bvec, bio, iter_all) {
-               memcpy(p, page_address(bvec->bv_page), bvec->bv_len);
+               memcpy_from_bvec(p, bvec);
                p += bvec->bv_len;
        }
 
index a11b3b5..eeba842 100644 (file)
@@ -348,6 +348,8 @@ void __blk_queue_split(struct bio **bio, unsigned int *nr_segs)
                trace_block_split(split, (*bio)->bi_iter.bi_sector);
                submit_bio_noacct(*bio);
                *bio = split;
+
+               blk_throtl_charge_bio_split(*bio);
        }
 }
 
@@ -705,22 +707,6 @@ static void blk_account_io_merge_request(struct request *req)
        }
 }
 
-/*
- * Two cases of handling DISCARD merge:
- * If max_discard_segments > 1, the driver takes every bio
- * as a range and send them to controller together. The ranges
- * needn't to be contiguous.
- * Otherwise, the bios/requests will be handled as same as
- * others which should be contiguous.
- */
-static inline bool blk_discard_mergable(struct request *req)
-{
-       if (req_op(req) == REQ_OP_DISCARD &&
-           queue_max_discard_segments(req->q) > 1)
-               return true;
-       return false;
-}
-
 static enum elv_merge blk_try_req_merge(struct request *req,
                                        struct request *next)
 {
index c838d81..0f006ca 100644 (file)
@@ -515,17 +515,6 @@ void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
        percpu_ref_put(&q->q_usage_counter);
 }
 
-static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set,
-                                  struct blk_mq_hw_ctx *hctx,
-                                  unsigned int hctx_idx)
-{
-       if (hctx->sched_tags) {
-               blk_mq_free_rqs(set, hctx->sched_tags, hctx_idx);
-               blk_mq_free_rq_map(hctx->sched_tags, set->flags);
-               hctx->sched_tags = NULL;
-       }
-}
-
 static int blk_mq_sched_alloc_tags(struct request_queue *q,
                                   struct blk_mq_hw_ctx *hctx,
                                   unsigned int hctx_idx)
@@ -539,8 +528,10 @@ static int blk_mq_sched_alloc_tags(struct request_queue *q,
                return -ENOMEM;
 
        ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests);
-       if (ret)
-               blk_mq_sched_free_tags(set, hctx, hctx_idx);
+       if (ret) {
+               blk_mq_free_rq_map(hctx->sched_tags, set->flags);
+               hctx->sched_tags = NULL;
+       }
 
        return ret;
 }
index 7b52e76..253c857 100644 (file)
@@ -45,60 +45,12 @@ static void blk_mq_hw_sysfs_release(struct kobject *kobj)
        kfree(hctx);
 }
 
-struct blk_mq_ctx_sysfs_entry {
-       struct attribute attr;
-       ssize_t (*show)(struct blk_mq_ctx *, char *);
-       ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t);
-};
-
 struct blk_mq_hw_ctx_sysfs_entry {
        struct attribute attr;
        ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
        ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
 };
 
-static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr,
-                                char *page)
-{
-       struct blk_mq_ctx_sysfs_entry *entry;
-       struct blk_mq_ctx *ctx;
-       struct request_queue *q;
-       ssize_t res;
-
-       entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
-       ctx = container_of(kobj, struct blk_mq_ctx, kobj);
-       q = ctx->queue;
-
-       if (!entry->show)
-               return -EIO;
-
-       mutex_lock(&q->sysfs_lock);
-       res = entry->show(ctx, page);
-       mutex_unlock(&q->sysfs_lock);
-       return res;
-}
-
-static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr,
-                                 const char *page, size_t length)
-{
-       struct blk_mq_ctx_sysfs_entry *entry;
-       struct blk_mq_ctx *ctx;
-       struct request_queue *q;
-       ssize_t res;
-
-       entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
-       ctx = container_of(kobj, struct blk_mq_ctx, kobj);
-       q = ctx->queue;
-
-       if (!entry->store)
-               return -EIO;
-
-       mutex_lock(&q->sysfs_lock);
-       res = entry->store(ctx, page, length);
-       mutex_unlock(&q->sysfs_lock);
-       return res;
-}
-
 static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
                                    struct attribute *attr, char *page)
 {
@@ -198,23 +150,16 @@ static struct attribute *default_hw_ctx_attrs[] = {
 };
 ATTRIBUTE_GROUPS(default_hw_ctx);
 
-static const struct sysfs_ops blk_mq_sysfs_ops = {
-       .show   = blk_mq_sysfs_show,
-       .store  = blk_mq_sysfs_store,
-};
-
 static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
        .show   = blk_mq_hw_sysfs_show,
        .store  = blk_mq_hw_sysfs_store,
 };
 
 static struct kobj_type blk_mq_ktype = {
-       .sysfs_ops      = &blk_mq_sysfs_ops,
        .release        = blk_mq_sysfs_release,
 };
 
 static struct kobj_type blk_mq_ctx_ktype = {
-       .sysfs_ops      = &blk_mq_sysfs_ops,
        .release        = blk_mq_ctx_sysfs_release,
 };
 
index 2c4ac51..9440499 100644 (file)
@@ -525,7 +525,7 @@ void blk_mq_free_request(struct request *rq)
                __blk_mq_dec_active_requests(hctx);
 
        if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
-               laptop_io_completion(q->backing_dev_info);
+               laptop_io_completion(q->disk->bdi);
 
        rq_qos_done(q, rq);
 
@@ -606,7 +606,7 @@ static inline bool blk_mq_complete_need_ipi(struct request *rq)
         * This is probably worse than completing the request on a different
         * cache domain.
         */
-       if (force_irqthreads)
+       if (force_irqthreads())
                return false;
 
        /* same CPU or cache domain?  Complete locally */
@@ -911,7 +911,7 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
 
 void blk_mq_put_rq_ref(struct request *rq)
 {
-       if (is_flush_rq(rq, rq->mq_hctx))
+       if (is_flush_rq(rq))
                rq->end_io(rq, 0);
        else if (refcount_dec_and_test(&rq->ref))
                __blk_mq_free_request(rq);
@@ -923,34 +923,14 @@ static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
        unsigned long *next = priv;
 
        /*
-        * Just do a quick check if it is expired before locking the request in
-        * so we're not unnecessarilly synchronizing across CPUs.
-        */
-       if (!blk_mq_req_expired(rq, next))
-               return true;
-
-       /*
-        * We have reason to believe the request may be expired. Take a
-        * reference on the request to lock this request lifetime into its
-        * currently allocated context to prevent it from being reallocated in
-        * the event the completion by-passes this timeout handler.
-        *
-        * If the reference was already released, then the driver beat the
-        * timeout handler to posting a natural completion.
-        */
-       if (!refcount_inc_not_zero(&rq->ref))
-               return true;
-
-       /*
-        * The request is now locked and cannot be reallocated underneath the
-        * timeout handler's processing. Re-verify this exact request is truly
-        * expired; if it is not expired, then the request was completed and
-        * reallocated as a new request.
+        * blk_mq_queue_tag_busy_iter() has locked the request, so it cannot
+        * be reallocated underneath the timeout handler's processing, then
+        * the expire check is reliable. If the request is not expired, then
+        * it was completed and reallocated as a new request after returning
+        * from blk_mq_check_expired().
         */
        if (blk_mq_req_expired(rq, next))
                blk_mq_rq_timed_out(rq, reserved);
-
-       blk_mq_put_rq_ref(rq);
        return true;
 }
 
@@ -2994,10 +2974,12 @@ static void queue_set_hctx_shared(struct request_queue *q, bool shared)
        int i;
 
        queue_for_each_hw_ctx(q, hctx, i) {
-               if (shared)
+               if (shared) {
                        hctx->flags |= BLK_MQ_F_TAG_QUEUE_SHARED;
-               else
+               } else {
+                       blk_mq_tag_idle(hctx);
                        hctx->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
+               }
        }
 }
 
@@ -3133,7 +3115,8 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
 }
 EXPORT_SYMBOL(blk_mq_init_queue);
 
-struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata)
+struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
+               struct lock_class_key *lkclass)
 {
        struct request_queue *q;
        struct gendisk *disk;
@@ -3142,12 +3125,11 @@ struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata)
        if (IS_ERR(q))
                return ERR_CAST(q);
 
-       disk = __alloc_disk_node(0, set->numa_node);
+       disk = __alloc_disk_node(q, set->numa_node, lkclass);
        if (!disk) {
                blk_cleanup_queue(q);
                return ERR_PTR(-ENOMEM);
        }
-       disk->queue = q;
        return disk;
 }
 EXPORT_SYMBOL(__blk_mq_alloc_disk);
index 902c40d..a7c857a 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/bio.h>
 #include <linux/blkdev.h>
 #include <linux/pagemap.h>
+#include <linux/backing-dev-defs.h>
 #include <linux/gcd.h>
 #include <linux/lcm.h>
 #include <linux/jiffies.h>
@@ -140,7 +141,9 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto
                                 limits->logical_block_size >> SECTOR_SHIFT);
        limits->max_sectors = max_sectors;
 
-       q->backing_dev_info->io_pages = max_sectors >> (PAGE_SHIFT - 9);
+       if (!q->disk)
+               return;
+       q->disk->bdi->io_pages = max_sectors >> (PAGE_SHIFT - 9);
 }
 EXPORT_SYMBOL(blk_queue_max_hw_sectors);
 
@@ -380,18 +383,19 @@ void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
 }
 EXPORT_SYMBOL(blk_queue_alignment_offset);
 
-void blk_queue_update_readahead(struct request_queue *q)
+void disk_update_readahead(struct gendisk *disk)
 {
+       struct request_queue *q = disk->queue;
+
        /*
         * For read-ahead of large files to be effective, we need to read ahead
         * at least twice the optimal I/O size.
         */
-       q->backing_dev_info->ra_pages =
+       disk->bdi->ra_pages =
                max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
-       q->backing_dev_info->io_pages =
-               queue_max_sectors(q) >> (PAGE_SHIFT - 9);
+       disk->bdi->io_pages = queue_max_sectors(q) >> (PAGE_SHIFT - 9);
 }
-EXPORT_SYMBOL_GPL(blk_queue_update_readahead);
+EXPORT_SYMBOL_GPL(disk_update_readahead);
 
 /**
  * blk_limits_io_min - set minimum request size for a device
@@ -471,7 +475,9 @@ EXPORT_SYMBOL(blk_limits_io_opt);
 void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
 {
        blk_limits_io_opt(&q->limits, opt);
-       q->backing_dev_info->ra_pages =
+       if (!q->disk)
+               return;
+       q->disk->bdi->ra_pages =
                max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
 }
 EXPORT_SYMBOL(blk_queue_io_opt);
@@ -661,17 +667,11 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
        struct request_queue *t = disk->queue;
 
        if (blk_stack_limits(&t->limits, &bdev_get_queue(bdev)->limits,
-                       get_start_sect(bdev) + (offset >> 9)) < 0) {
-               char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
-
-               disk_name(disk, 0, top);
-               bdevname(bdev, bottom);
-
-               printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
-                      top, bottom);
-       }
+                       get_start_sect(bdev) + (offset >> 9)) < 0)
+               pr_notice("%s: Warning: Device %pg is misaligned\n",
+                       disk->disk_name, bdev);
 
-       blk_queue_update_readahead(disk->queue);
+       disk_update_readahead(disk);
 }
 EXPORT_SYMBOL(disk_stack_limits);
 
index 370d83c..614d9d4 100644 (file)
@@ -88,9 +88,11 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
 
 static ssize_t queue_ra_show(struct request_queue *q, char *page)
 {
-       unsigned long ra_kb = q->backing_dev_info->ra_pages <<
-                                       (PAGE_SHIFT - 10);
+       unsigned long ra_kb;
 
+       if (!q->disk)
+               return -EINVAL;
+       ra_kb = q->disk->bdi->ra_pages << (PAGE_SHIFT - 10);
        return queue_var_show(ra_kb, page);
 }
 
@@ -98,13 +100,14 @@ static ssize_t
 queue_ra_store(struct request_queue *q, const char *page, size_t count)
 {
        unsigned long ra_kb;
-       ssize_t ret = queue_var_store(&ra_kb, page, count);
+       ssize_t ret;
 
+       if (!q->disk)
+               return -EINVAL;
+       ret = queue_var_store(&ra_kb, page, count);
        if (ret < 0)
                return ret;
-
-       q->backing_dev_info->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
-
+       q->disk->bdi->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
        return ret;
 }
 
@@ -251,7 +254,8 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
 
        spin_lock_irq(&q->queue_lock);
        q->limits.max_sectors = max_sectors_kb << 1;
-       q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
+       if (q->disk)
+               q->disk->bdi->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
        spin_unlock_irq(&q->queue_lock);
 
        return ret;
@@ -766,13 +770,6 @@ static void blk_exit_queue(struct request_queue *q)
         * e.g. blkcg_print_blkgs() to crash.
         */
        blkcg_exit_queue(q);
-
-       /*
-        * Since the cgroup code may dereference the @q->backing_dev_info
-        * pointer, only decrease its reference count after having removed the
-        * association with the block cgroup controller.
-        */
-       bdi_put(q->backing_dev_info);
 }
 
 /**
@@ -859,15 +856,6 @@ int blk_register_queue(struct gendisk *disk)
        struct device *dev = disk_to_dev(disk);
        struct request_queue *q = disk->queue;
 
-       if (WARN_ON(!q))
-               return -ENXIO;
-
-       WARN_ONCE(blk_queue_registered(q),
-                 "%s is registering an already registered queue\n",
-                 kobject_name(&dev->kobj));
-
-       blk_queue_update_readahead(q);
-
        ret = blk_trace_init_sysfs(dev);
        if (ret)
                return ret;
@@ -941,7 +929,6 @@ unlock:
 
        return ret;
 }
-EXPORT_SYMBOL_GPL(blk_register_queue);
 
 /**
  * blk_unregister_queue - counterpart of blk_register_queue()
index b1b22d8..55c4901 100644 (file)
@@ -178,6 +178,9 @@ struct throtl_grp {
        unsigned int bad_bio_cnt; /* bios exceeding latency threshold */
        unsigned long bio_cnt_reset_time;
 
+       atomic_t io_split_cnt[2];
+       atomic_t last_io_split_cnt[2];
+
        struct blkg_rwstat stat_bytes;
        struct blkg_rwstat stat_ios;
 };
@@ -777,6 +780,8 @@ static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
        tg->bytes_disp[rw] = 0;
        tg->io_disp[rw] = 0;
 
+       atomic_set(&tg->io_split_cnt[rw], 0);
+
        /*
         * Previous slice has expired. We must have trimmed it after last
         * bio dispatch. That means since start of last slice, we never used
@@ -799,6 +804,9 @@ static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
        tg->io_disp[rw] = 0;
        tg->slice_start[rw] = jiffies;
        tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
+
+       atomic_set(&tg->io_split_cnt[rw], 0);
+
        throtl_log(&tg->service_queue,
                   "[%c] new slice start=%lu end=%lu jiffies=%lu",
                   rw == READ ? 'R' : 'W', tg->slice_start[rw],
@@ -1031,6 +1039,9 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
                                jiffies + tg->td->throtl_slice);
        }
 
+       if (iops_limit != UINT_MAX)
+               tg->io_disp[rw] += atomic_xchg(&tg->io_split_cnt[rw], 0);
+
        if (tg_with_in_bps_limit(tg, bio, bps_limit, &bps_wait) &&
            tg_with_in_iops_limit(tg, bio, iops_limit, &iops_wait)) {
                if (wait)
@@ -2052,12 +2063,14 @@ static void throtl_downgrade_check(struct throtl_grp *tg)
        }
 
        if (tg->iops[READ][LIMIT_LOW]) {
+               tg->last_io_disp[READ] += atomic_xchg(&tg->last_io_split_cnt[READ], 0);
                iops = tg->last_io_disp[READ] * HZ / elapsed_time;
                if (iops >= tg->iops[READ][LIMIT_LOW])
                        tg->last_low_overflow_time[READ] = now;
        }
 
        if (tg->iops[WRITE][LIMIT_LOW]) {
+               tg->last_io_disp[WRITE] += atomic_xchg(&tg->last_io_split_cnt[WRITE], 0);
                iops = tg->last_io_disp[WRITE] * HZ / elapsed_time;
                if (iops >= tg->iops[WRITE][LIMIT_LOW])
                        tg->last_low_overflow_time[WRITE] = now;
@@ -2176,6 +2189,25 @@ static inline void throtl_update_latency_buckets(struct throtl_data *td)
 }
 #endif
 
+void blk_throtl_charge_bio_split(struct bio *bio)
+{
+       struct blkcg_gq *blkg = bio->bi_blkg;
+       struct throtl_grp *parent = blkg_to_tg(blkg);
+       struct throtl_service_queue *parent_sq;
+       bool rw = bio_data_dir(bio);
+
+       do {
+               if (!parent->has_rules[rw])
+                       break;
+
+               atomic_inc(&parent->io_split_cnt[rw]);
+               atomic_inc(&parent->last_io_split_cnt[rw]);
+
+               parent_sq = parent->service_queue.parent_sq;
+               parent = sq_to_tg(parent_sq);
+       } while (parent);
+}
+
 bool blk_throtl_bio(struct bio *bio)
 {
        struct request_queue *q = bio->bi_bdev->bd_disk->queue;
index 3ed71b8..874c1c3 100644 (file)
@@ -97,7 +97,7 @@ static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
  */
 static bool wb_recent_wait(struct rq_wb *rwb)
 {
-       struct bdi_writeback *wb = &rwb->rqos.q->backing_dev_info->wb;
+       struct bdi_writeback *wb = &rwb->rqos.q->disk->bdi->wb;
 
        return time_before(jiffies, wb->dirty_sleep + HZ);
 }
@@ -234,7 +234,7 @@ enum {
 
 static int latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
 {
-       struct backing_dev_info *bdi = rwb->rqos.q->backing_dev_info;
+       struct backing_dev_info *bdi = rwb->rqos.q->disk->bdi;
        struct rq_depth *rqd = &rwb->rq_depth;
        u64 thislat;
 
@@ -287,7 +287,7 @@ static int latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
 
 static void rwb_trace_step(struct rq_wb *rwb, const char *msg)
 {
-       struct backing_dev_info *bdi = rwb->rqos.q->backing_dev_info;
+       struct backing_dev_info *bdi = rwb->rqos.q->disk->bdi;
        struct rq_depth *rqd = &rwb->rq_depth;
 
        trace_wbt_step(bdi, msg, rqd->scale_step, rwb->cur_win_nsec,
@@ -359,7 +359,7 @@ static void wb_timer_fn(struct blk_stat_callback *cb)
 
        status = latency_exceeded(rwb, cb->stat);
 
-       trace_wbt_timer(rwb->rqos.q->backing_dev_info, status, rqd->scale_step,
+       trace_wbt_timer(rwb->rqos.q->disk->bdi, status, rqd->scale_step,
                        inflight);
 
        /*
index 86fce75..1d0c76c 100644 (file)
@@ -360,9 +360,6 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
        if (!blk_queue_is_zoned(q))
                return -ENOTTY;
 
-       if (!capable(CAP_SYS_ADMIN))
-               return -EACCES;
-
        if (copy_from_user(&rep, argp, sizeof(struct blk_zone_report)))
                return -EFAULT;
 
@@ -421,9 +418,6 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
        if (!blk_queue_is_zoned(q))
                return -ENOTTY;
 
-       if (!capable(CAP_SYS_ADMIN))
-               return -EACCES;
-
        if (!(mode & FMODE_WRITE))
                return -EBADF;
 
index 4b885c0..346d184 100644 (file)
@@ -44,11 +44,7 @@ static inline void __blk_get_queue(struct request_queue *q)
        kobject_get(&q->kobj);
 }
 
-static inline bool
-is_flush_rq(struct request *req, struct blk_mq_hw_ctx *hctx)
-{
-       return hctx->fq->flush_rq == req;
-}
+bool is_flush_rq(struct request *req);
 
 struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
                                              gfp_t flags);
@@ -132,7 +128,7 @@ static inline bool integrity_req_gap_front_merge(struct request *req,
                                bip_next->bip_vec[0].bv_offset);
 }
 
-void blk_integrity_add(struct gendisk *);
+int blk_integrity_add(struct gendisk *disk);
 void blk_integrity_del(struct gendisk *);
 #else /* CONFIG_BLK_DEV_INTEGRITY */
 static inline bool blk_integrity_merge_rq(struct request_queue *rq,
@@ -166,8 +162,9 @@ static inline bool bio_integrity_endio(struct bio *bio)
 static inline void bio_integrity_free(struct bio *bio)
 {
 }
-static inline void blk_integrity_add(struct gendisk *disk)
+static inline int blk_integrity_add(struct gendisk *disk)
 {
+       return 0;
 }
 static inline void blk_integrity_del(struct gendisk *disk)
 {
@@ -293,11 +290,13 @@ int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
 extern int blk_throtl_init(struct request_queue *q);
 extern void blk_throtl_exit(struct request_queue *q);
 extern void blk_throtl_register_queue(struct request_queue *q);
+extern void blk_throtl_charge_bio_split(struct bio *bio);
 bool blk_throtl_bio(struct bio *bio);
 #else /* CONFIG_BLK_DEV_THROTTLING */
 static inline int blk_throtl_init(struct request_queue *q) { return 0; }
 static inline void blk_throtl_exit(struct request_queue *q) { }
 static inline void blk_throtl_register_queue(struct request_queue *q) { }
+static inline void blk_throtl_charge_bio_split(struct bio *bio) { }
 static inline bool blk_throtl_bio(struct bio *bio) { return false; }
 #endif /* CONFIG_BLK_DEV_THROTTLING */
 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
@@ -344,15 +343,14 @@ static inline void blk_queue_clear_zone_settings(struct request_queue *q) {}
 
 int blk_alloc_ext_minor(void);
 void blk_free_ext_minor(unsigned int minor);
-char *disk_name(struct gendisk *hd, int partno, char *buf);
 #define ADDPART_FLAG_NONE      0
 #define ADDPART_FLAG_RAID      1
 #define ADDPART_FLAG_WHOLEDISK 2
-int bdev_add_partition(struct block_device *bdev, int partno,
-               sector_t start, sector_t length);
-int bdev_del_partition(struct block_device *bdev, int partno);
-int bdev_resize_partition(struct block_device *bdev, int partno,
-               sector_t start, sector_t length);
+int bdev_add_partition(struct gendisk *disk, int partno, sector_t start,
+               sector_t length);
+int bdev_del_partition(struct gendisk *disk, int partno);
+int bdev_resize_partition(struct gendisk *disk, int partno, sector_t start,
+               sector_t length);
 
 int bio_add_hw_page(struct request_queue *q, struct bio *bio,
                struct page *page, unsigned int len, unsigned int offset,
@@ -360,7 +358,7 @@ int bio_add_hw_page(struct request_queue *q, struct bio *bio,
 
 struct request_queue *blk_alloc_queue(int node_id);
 
-void disk_alloc_events(struct gendisk *disk);
+int disk_alloc_events(struct gendisk *disk);
 void disk_add_events(struct gendisk *disk);
 void disk_del_events(struct gendisk *disk);
 void disk_release_events(struct gendisk *disk);
index 94081e0..05fc714 100644 (file)
@@ -67,18 +67,6 @@ static __init int init_emergency_pool(void)
 
 __initcall(init_emergency_pool);
 
-/*
- * highmem version, map in to vec
- */
-static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
-{
-       unsigned char *vto;
-
-       vto = kmap_atomic(to->bv_page);
-       memcpy(vto + to->bv_offset, vfrom, to->bv_len);
-       kunmap_atomic(vto);
-}
-
 /*
  * Simple bounce buffer support for highmem pages. Depending on the
  * queue gfp mask set, *to may or may not be a highmem page. kmap it
@@ -86,7 +74,6 @@ static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
  */
 static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
 {
-       unsigned char *vfrom;
        struct bio_vec tovec, fromvec;
        struct bvec_iter iter;
        /*
@@ -104,11 +91,8 @@ static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
                         * been modified by the block layer, so use the original
                         * copy, bounce_copy_vec already uses tovec->bv_len
                         */
-                       vfrom = page_address(fromvec.bv_page) +
-                               tovec.bv_offset;
-
-                       bounce_copy_vec(&tovec, vfrom);
-                       flush_dcache_page(tovec.bv_page);
+                       memcpy_to_bvec(&tovec, page_address(fromvec.bv_page) +
+                                      tovec.bv_offset);
                }
                bio_advance_iter(from, &from_iter, tovec.bv_len);
        }
@@ -255,24 +239,19 @@ void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
         * because the 'bio' is single-page bvec.
         */
        for (i = 0, to = bio->bi_io_vec; i < bio->bi_vcnt; to++, i++) {
-               struct page *page = to->bv_page;
+               struct page *bounce_page;
 
-               if (!PageHighMem(page))
+               if (!PageHighMem(to->bv_page))
                        continue;
 
-               to->bv_page = mempool_alloc(&page_pool, GFP_NOIO);
-               inc_zone_page_state(to->bv_page, NR_BOUNCE);
+               bounce_page = mempool_alloc(&page_pool, GFP_NOIO);
+               inc_zone_page_state(bounce_page, NR_BOUNCE);
 
                if (rw == WRITE) {
-                       char *vto, *vfrom;
-
-                       flush_dcache_page(page);
-
-                       vto = page_address(to->bv_page) + to->bv_offset;
-                       vfrom = kmap_atomic(page) + to->bv_offset;
-                       memcpy(vto, vfrom, to->bv_len);
-                       kunmap_atomic(vfrom);
+                       flush_dcache_page(to->bv_page);
+                       memcpy_from_bvec(page_address(bounce_page), to);
                }
+               to->bv_page = bounce_page;
        }
 
        trace_block_bio_bounce(*bio_orig);
diff --git a/block/cmdline-parser.c b/block/cmdline-parser.c
deleted file mode 100644 (file)
index f2a1457..0000000
+++ /dev/null
@@ -1,255 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Parse command line, get partition information
- *
- * Written by Cai Zhiyong <caizhiyong@huawei.com>
- *
- */
-#include <linux/export.h>
-#include <linux/cmdline-parser.h>
-
-static int parse_subpart(struct cmdline_subpart **subpart, char *partdef)
-{
-       int ret = 0;
-       struct cmdline_subpart *new_subpart;
-
-       *subpart = NULL;
-
-       new_subpart = kzalloc(sizeof(struct cmdline_subpart), GFP_KERNEL);
-       if (!new_subpart)
-               return -ENOMEM;
-
-       if (*partdef == '-') {
-               new_subpart->size = (sector_t)(~0ULL);
-               partdef++;
-       } else {
-               new_subpart->size = (sector_t)memparse(partdef, &partdef);
-               if (new_subpart->size < (sector_t)PAGE_SIZE) {
-                       pr_warn("cmdline partition size is invalid.");
-                       ret = -EINVAL;
-                       goto fail;
-               }
-       }
-
-       if (*partdef == '@') {
-               partdef++;
-               new_subpart->from = (sector_t)memparse(partdef, &partdef);
-       } else {
-               new_subpart->from = (sector_t)(~0ULL);
-       }
-
-       if (*partdef == '(') {
-               int length;
-               char *next = strchr(++partdef, ')');
-
-               if (!next) {
-                       pr_warn("cmdline partition format is invalid.");
-                       ret = -EINVAL;
-                       goto fail;
-               }
-
-               length = min_t(int, next - partdef,
-                              sizeof(new_subpart->name) - 1);
-               strncpy(new_subpart->name, partdef, length);
-               new_subpart->name[length] = '\0';
-
-               partdef = ++next;
-       } else
-               new_subpart->name[0] = '\0';
-
-       new_subpart->flags = 0;
-
-       if (!strncmp(partdef, "ro", 2)) {
-               new_subpart->flags |= PF_RDONLY;
-               partdef += 2;
-       }
-
-       if (!strncmp(partdef, "lk", 2)) {
-               new_subpart->flags |= PF_POWERUP_LOCK;
-               partdef += 2;
-       }
-
-       *subpart = new_subpart;
-       return 0;
-fail:
-       kfree(new_subpart);
-       return ret;
-}
-
-static void free_subpart(struct cmdline_parts *parts)
-{
-       struct cmdline_subpart *subpart;
-
-       while (parts->subpart) {
-               subpart = parts->subpart;
-               parts->subpart = subpart->next_subpart;
-               kfree(subpart);
-       }
-}
-
-static int parse_parts(struct cmdline_parts **parts, const char *bdevdef)
-{
-       int ret = -EINVAL;
-       char *next;
-       int length;
-       struct cmdline_subpart **next_subpart;
-       struct cmdline_parts *newparts;
-       char buf[BDEVNAME_SIZE + 32 + 4];
-
-       *parts = NULL;
-
-       newparts = kzalloc(sizeof(struct cmdline_parts), GFP_KERNEL);
-       if (!newparts)
-               return -ENOMEM;
-
-       next = strchr(bdevdef, ':');
-       if (!next) {
-               pr_warn("cmdline partition has no block device.");
-               goto fail;
-       }
-
-       length = min_t(int, next - bdevdef, sizeof(newparts->name) - 1);
-       strncpy(newparts->name, bdevdef, length);
-       newparts->name[length] = '\0';
-       newparts->nr_subparts = 0;
-
-       next_subpart = &newparts->subpart;
-
-       while (next && *(++next)) {
-               bdevdef = next;
-               next = strchr(bdevdef, ',');
-
-               length = (!next) ? (sizeof(buf) - 1) :
-                       min_t(int, next - bdevdef, sizeof(buf) - 1);
-
-               strncpy(buf, bdevdef, length);
-               buf[length] = '\0';
-
-               ret = parse_subpart(next_subpart, buf);
-               if (ret)
-                       goto fail;
-
-               newparts->nr_subparts++;
-               next_subpart = &(*next_subpart)->next_subpart;
-       }
-
-       if (!newparts->subpart) {
-               pr_warn("cmdline partition has no valid partition.");
-               ret = -EINVAL;
-               goto fail;
-       }
-
-       *parts = newparts;
-
-       return 0;
-fail:
-       free_subpart(newparts);
-       kfree(newparts);
-       return ret;
-}
-
-void cmdline_parts_free(struct cmdline_parts **parts)
-{
-       struct cmdline_parts *next_parts;
-
-       while (*parts) {
-               next_parts = (*parts)->next_parts;
-               free_subpart(*parts);
-               kfree(*parts);
-               *parts = next_parts;
-       }
-}
-EXPORT_SYMBOL(cmdline_parts_free);
-
-int cmdline_parts_parse(struct cmdline_parts **parts, const char *cmdline)
-{
-       int ret;
-       char *buf;
-       char *pbuf;
-       char *next;
-       struct cmdline_parts **next_parts;
-
-       *parts = NULL;
-
-       next = pbuf = buf = kstrdup(cmdline, GFP_KERNEL);
-       if (!buf)
-               return -ENOMEM;
-
-       next_parts = parts;
-
-       while (next && *pbuf) {
-               next = strchr(pbuf, ';');
-               if (next)
-                       *next = '\0';
-
-               ret = parse_parts(next_parts, pbuf);
-               if (ret)
-                       goto fail;
-
-               if (next)
-                       pbuf = ++next;
-
-               next_parts = &(*next_parts)->next_parts;
-       }
-
-       if (!*parts) {
-               pr_warn("cmdline partition has no valid partition.");
-               ret = -EINVAL;
-               goto fail;
-       }
-
-       ret = 0;
-done:
-       kfree(buf);
-       return ret;
-
-fail:
-       cmdline_parts_free(parts);
-       goto done;
-}
-EXPORT_SYMBOL(cmdline_parts_parse);
-
-struct cmdline_parts *cmdline_parts_find(struct cmdline_parts *parts,
-                                        const char *bdev)
-{
-       while (parts && strncmp(bdev, parts->name, sizeof(parts->name)))
-               parts = parts->next_parts;
-       return parts;
-}
-EXPORT_SYMBOL(cmdline_parts_find);
-
-/*
- *  add_part()
- *    0 success.
- *    1 can not add so many partitions.
- */
-int cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size,
-                     int slot,
-                     int (*add_part)(int, struct cmdline_subpart *, void *),
-                     void *param)
-{
-       sector_t from = 0;
-       struct cmdline_subpart *subpart;
-
-       for (subpart = parts->subpart; subpart;
-            subpart = subpart->next_subpart, slot++) {
-               if (subpart->from == (sector_t)(~0ULL))
-                       subpart->from = from;
-               else
-                       from = subpart->from;
-
-               if (from >= disk_size)
-                       break;
-
-               if (subpart->size > (disk_size - from))
-                       subpart->size = disk_size - from;
-
-               from += subpart->size;
-
-               if (add_part(slot, subpart, param))
-                       break;
-       }
-
-       return slot;
-}
-EXPORT_SYMBOL(cmdline_parts_set);
index a75931f..8d5496e 100644 (file)
@@ -163,15 +163,31 @@ void disk_flush_events(struct gendisk *disk, unsigned int mask)
        spin_unlock_irq(&ev->lock);
 }
 
+/*
+ * Tell userland about new events.  Only the events listed in @disk->events are
+ * reported, and only if DISK_EVENT_FLAG_UEVENT is set.  Otherwise, events are
+ * processed internally but never get reported to userland.
+ */
+static void disk_event_uevent(struct gendisk *disk, unsigned int events)
+{
+       char *envp[ARRAY_SIZE(disk_uevents) + 1] = { };
+       int nr_events = 0, i;
+
+       for (i = 0; i < ARRAY_SIZE(disk_uevents); i++)
+               if (events & disk->events & (1 << i))
+                       envp[nr_events++] = disk_uevents[i];
+
+       if (nr_events)
+               kobject_uevent_env(&disk_to_dev(disk)->kobj, KOBJ_CHANGE, envp);
+}
+
 static void disk_check_events(struct disk_events *ev,
                              unsigned int *clearing_ptr)
 {
        struct gendisk *disk = ev->disk;
-       char *envp[ARRAY_SIZE(disk_uevents) + 1] = { };
        unsigned int clearing = *clearing_ptr;
        unsigned int events;
        unsigned long intv;
-       int nr_events = 0, i;
 
        /* check events */
        events = disk->fops->check_events(disk, clearing);
@@ -190,19 +206,11 @@ static void disk_check_events(struct disk_events *ev,
 
        spin_unlock_irq(&ev->lock);
 
-       /*
-        * Tell userland about new events.  Only the events listed in
-        * @disk->events are reported, and only if DISK_EVENT_FLAG_UEVENT
-        * is set. Otherwise, events are processed internally but never
-        * get reported to userland.
-        */
-       for (i = 0; i < ARRAY_SIZE(disk_uevents); i++)
-               if ((events & disk->events & (1 << i)) &&
-                   (disk->event_flags & DISK_EVENT_FLAG_UEVENT))
-                       envp[nr_events++] = disk_uevents[i];
+       if (events & DISK_EVENT_MEDIA_CHANGE)
+               inc_diskseq(disk);
 
-       if (nr_events)
-               kobject_uevent_env(&disk_to_dev(disk)->kobj, KOBJ_CHANGE, envp);
+       if (disk->event_flags & DISK_EVENT_FLAG_UEVENT)
+               disk_event_uevent(disk, events);
 }
 
 /**
@@ -281,6 +289,32 @@ bool bdev_check_media_change(struct block_device *bdev)
 }
 EXPORT_SYMBOL(bdev_check_media_change);
 
+/**
+ * disk_force_media_change - force a media change event
+ * @disk: the disk which will raise the event
+ * @events: the events to raise
+ *
+ * Generate uevents for the disk. If DISK_EVENT_MEDIA_CHANGE is present,
+ * attempt to free all dentries and inodes and invalidates all block
+ * device page cache entries in that case.
+ *
+ * Returns %true if DISK_EVENT_MEDIA_CHANGE was raised, or %false if not.
+ */
+bool disk_force_media_change(struct gendisk *disk, unsigned int events)
+{
+       disk_event_uevent(disk, events);
+
+       if (!(events & DISK_EVENT_MEDIA_CHANGE))
+               return false;
+
+       if (__invalidate_device(disk->part0, true))
+               pr_warn("VFS: busy inodes on changed media %s\n",
+                       disk->disk_name);
+       set_bit(GD_NEED_PART_SCAN, &disk->state);
+       return true;
+}
+EXPORT_SYMBOL_GPL(disk_force_media_change);
+
 /*
  * Separate this part out so that a different pointer for clearing_ptr can be
  * passed in for disk_clear_events.
@@ -410,17 +444,17 @@ module_param_cb(events_dfl_poll_msecs, &disk_events_dfl_poll_msecs_param_ops,
 /*
  * disk_{alloc|add|del|release}_events - initialize and destroy disk_events.
  */
-void disk_alloc_events(struct gendisk *disk)
+int disk_alloc_events(struct gendisk *disk)
 {
        struct disk_events *ev;
 
        if (!disk->fops->check_events || !disk->events)
-               return;
+               return 0;
 
        ev = kzalloc(sizeof(*ev), GFP_KERNEL);
        if (!ev) {
                pr_warn("%s: failed to initialize events\n", disk->disk_name);
-               return;
+               return -ENOMEM;
        }
 
        INIT_LIST_HEAD(&ev->node);
@@ -432,6 +466,7 @@ void disk_alloc_events(struct gendisk *disk)
        INIT_DELAYED_WORK(&ev->dwork, disk_events_workfn);
 
        disk->ev = ev;
+       return 0;
 }
 
 void disk_add_events(struct gendisk *disk)
index 52ada14..ff45d83 100644 (file)
@@ -336,6 +336,9 @@ enum elv_merge elv_merge(struct request_queue *q, struct request **req,
        __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
        if (__rq && elv_bio_merge_ok(__rq, bio)) {
                *req = __rq;
+
+               if (blk_discard_mergable(__rq))
+                       return ELEVATOR_DISCARD_MERGE;
                return ELEVATOR_BACK_MERGE;
        }
 
@@ -630,6 +633,9 @@ static inline bool elv_support_iosched(struct request_queue *q)
  */
 static struct elevator_type *elevator_get_default(struct request_queue *q)
 {
+       if (q->tag_set && q->tag_set->flags & BLK_MQ_F_NO_SCHED_BY_DEFAULT)
+               return NULL;
+
        if (q->nr_hw_queues != 1 &&
                        !blk_mq_is_sbitmap_shared(q->tag_set->flags))
                return NULL;
@@ -702,7 +708,6 @@ void elevator_init_mq(struct request_queue *q)
                elevator_put(e);
        }
 }
-EXPORT_SYMBOL_GPL(elevator_init_mq); /* only for dm-rq */
 
 /*
  * switch to new_e io scheduler. be careful not to introduce deadlocks -
index af4d2ab..567549a 100644 (file)
 
 static struct kobject *block_depr;
 
+/*
+ * Unique, monotonically increasing sequential number associated with block
+ * devices instances (i.e. incremented each time a device is attached).
+ * Associating uevents with block devices in userspace is difficult and racy:
+ * the uevent netlink socket is lossy, and on slow and overloaded systems has
+ * a very high latency.
+ * Block devices do not have exclusive owners in userspace, any process can set
+ * one up (e.g. loop devices). Moreover, device names can be reused (e.g. loop0
+ * can be reused again and again).
+ * A userspace process setting up a block device and watching for its events
+ * cannot thus reliably tell whether an event relates to the device it just set
+ * up or another earlier instance with the same name.
+ * This sequential number allows userspace processes to solve this problem, and
+ * uniquely associate an uevent to the lifetime to a device.
+ */
+static atomic64_t diskseq;
+
 /* for extended dynamic devt allocation, currently only one major is used */
 #define NR_EXT_DEVT            (1 << MINORBITS)
 static DEFINE_IDA(ext_devt_ida);
@@ -60,7 +77,8 @@ bool set_capacity_and_notify(struct gendisk *disk, sector_t size)
         * initial capacity during probing.
         */
        if (size == capacity ||
-           (disk->flags & (GENHD_FL_UP | GENHD_FL_HIDDEN)) != GENHD_FL_UP)
+           !disk_live(disk) ||
+           (disk->flags & GENHD_FL_HIDDEN))
                return false;
 
        pr_info("%s: detected capacity change from %lld to %lld\n",
@@ -78,11 +96,17 @@ bool set_capacity_and_notify(struct gendisk *disk, sector_t size)
 EXPORT_SYMBOL_GPL(set_capacity_and_notify);
 
 /*
- * Format the device name of the indicated disk into the supplied buffer and
- * return a pointer to that same buffer for convenience.
+ * Format the device name of the indicated block device into the supplied buffer
+ * and return a pointer to that same buffer for convenience.
+ *
+ * Note: do not use this in new code, use the %pg specifier to sprintf and
+ * printk insted.
  */
-char *disk_name(struct gendisk *hd, int partno, char *buf)
+const char *bdevname(struct block_device *bdev, char *buf)
 {
+       struct gendisk *hd = bdev->bd_disk;
+       int partno = bdev->bd_partno;
+
        if (!partno)
                snprintf(buf, BDEVNAME_SIZE, "%s", hd->disk_name);
        else if (isdigit(hd->disk_name[strlen(hd->disk_name)-1]))
@@ -92,11 +116,6 @@ char *disk_name(struct gendisk *hd, int partno, char *buf)
 
        return buf;
 }
-
-const char *bdevname(struct block_device *bdev, char *buf)
-{
-       return disk_name(bdev->bd_disk, bdev->bd_partno, buf);
-}
 EXPORT_SYMBOL(bdevname);
 
 static void part_stat_read_all(struct block_device *part,
@@ -294,54 +313,19 @@ void unregister_blkdev(unsigned int major, const char *name)
 
 EXPORT_SYMBOL(unregister_blkdev);
 
-/**
- * blk_mangle_minor - scatter minor numbers apart
- * @minor: minor number to mangle
- *
- * Scatter consecutively allocated @minor number apart if MANGLE_DEVT
- * is enabled.  Mangling twice gives the original value.
- *
- * RETURNS:
- * Mangled value.
- *
- * CONTEXT:
- * Don't care.
- */
-static int blk_mangle_minor(int minor)
-{
-#ifdef CONFIG_DEBUG_BLOCK_EXT_DEVT
-       int i;
-
-       for (i = 0; i < MINORBITS / 2; i++) {
-               int low = minor & (1 << i);
-               int high = minor & (1 << (MINORBITS - 1 - i));
-               int distance = MINORBITS - 1 - 2 * i;
-
-               minor ^= low | high;    /* clear both bits */
-               low <<= distance;       /* swap the positions */
-               high >>= distance;
-               minor |= low | high;    /* and set */
-       }
-#endif
-       return minor;
-}
-
 int blk_alloc_ext_minor(void)
 {
        int idx;
 
        idx = ida_alloc_range(&ext_devt_ida, 0, NR_EXT_DEVT, GFP_KERNEL);
-       if (idx < 0) {
-               if (idx == -ENOSPC)
-                       return -EBUSY;
-               return idx;
-       }
-       return blk_mangle_minor(idx);
+       if (idx == -ENOSPC)
+               return -EBUSY;
+       return idx;
 }
 
 void blk_free_ext_minor(unsigned int minor)
 {
-       ida_free(&ext_devt_ida, blk_mangle_minor(minor));
+       ida_free(&ext_devt_ida, minor);
 }
 
 static char *bdevt_str(dev_t devt, char *buf)
@@ -390,78 +374,20 @@ static void disk_scan_partitions(struct gendisk *disk)
                blkdev_put(bdev, FMODE_READ);
 }
 
-static void register_disk(struct device *parent, struct gendisk *disk,
-                         const struct attribute_group **groups)
-{
-       struct device *ddev = disk_to_dev(disk);
-       int err;
-
-       ddev->parent = parent;
-
-       dev_set_name(ddev, "%s", disk->disk_name);
-
-       /* delay uevents, until we scanned partition table */
-       dev_set_uevent_suppress(ddev, 1);
-
-       if (groups) {
-               WARN_ON(ddev->groups);
-               ddev->groups = groups;
-       }
-       if (device_add(ddev))
-               return;
-       if (!sysfs_deprecated) {
-               err = sysfs_create_link(block_depr, &ddev->kobj,
-                                       kobject_name(&ddev->kobj));
-               if (err) {
-                       device_del(ddev);
-                       return;
-               }
-       }
-
-       /*
-        * avoid probable deadlock caused by allocating memory with
-        * GFP_KERNEL in runtime_resume callback of its all ancestor
-        * devices
-        */
-       pm_runtime_set_memalloc_noio(ddev, true);
-
-       disk->part0->bd_holder_dir =
-               kobject_create_and_add("holders", &ddev->kobj);
-       disk->slave_dir = kobject_create_and_add("slaves", &ddev->kobj);
-
-       if (disk->flags & GENHD_FL_HIDDEN)
-               return;
-
-       disk_scan_partitions(disk);
-
-       /* announce the disk and partitions after all partitions are created */
-       dev_set_uevent_suppress(ddev, 0);
-       disk_uevent(disk, KOBJ_ADD);
-
-       if (disk->queue->backing_dev_info->dev) {
-               err = sysfs_create_link(&ddev->kobj,
-                         &disk->queue->backing_dev_info->dev->kobj,
-                         "bdi");
-               WARN_ON(err);
-       }
-}
-
 /**
- * __device_add_disk - add disk information to kernel list
+ * device_add_disk - add disk information to kernel list
  * @parent: parent device for the disk
  * @disk: per-device partitioning information
  * @groups: Additional per-device sysfs groups
- * @register_queue: register the queue if set to true
  *
  * This function registers the partitioning information in @disk
  * with the kernel.
- *
- * FIXME: error handling
  */
-static void __device_add_disk(struct device *parent, struct gendisk *disk,
-                             const struct attribute_group **groups,
-                             bool register_queue)
+int device_add_disk(struct device *parent, struct gendisk *disk,
+                    const struct attribute_group **groups)
+
 {
+       struct device *ddev = disk_to_dev(disk);
        int ret;
 
        /*
@@ -470,8 +396,7 @@ static void __device_add_disk(struct device *parent, struct gendisk *disk,
         * elevator if one is needed, that is, for devices requesting queue
         * registration.
         */
-       if (register_queue)
-               elevator_init_mq(disk->queue);
+       elevator_init_mq(disk->queue);
 
        /*
         * If the driver provides an explicit major number it also must provide
@@ -481,7 +406,8 @@ static void __device_add_disk(struct device *parent, struct gendisk *disk,
         * and all partitions from the extended dev_t space.
         */
        if (disk->major) {
-               WARN_ON(!disk->minors);
+               if (WARN_ON(!disk->minors))
+                       return -EINVAL;
 
                if (disk->minors > DISK_MAX_PARTS) {
                        pr_err("block: can't allocate more than %d partitions\n",
@@ -489,21 +415,65 @@ static void __device_add_disk(struct device *parent, struct gendisk *disk,
                        disk->minors = DISK_MAX_PARTS;
                }
        } else {
-               WARN_ON(disk->minors);
+               if (WARN_ON(disk->minors))
+                       return -EINVAL;
 
                ret = blk_alloc_ext_minor();
-               if (ret < 0) {
-                       WARN_ON(1);
-                       return;
-               }
+               if (ret < 0)
+                       return ret;
                disk->major = BLOCK_EXT_MAJOR;
-               disk->first_minor = MINOR(ret);
+               disk->first_minor = ret;
                disk->flags |= GENHD_FL_EXT_DEVT;
        }
 
-       disk->flags |= GENHD_FL_UP;
+       ret = disk_alloc_events(disk);
+       if (ret)
+               goto out_free_ext_minor;
+
+       /* delay uevents, until we scanned partition table */
+       dev_set_uevent_suppress(ddev, 1);
+
+       ddev->parent = parent;
+       ddev->groups = groups;
+       dev_set_name(ddev, "%s", disk->disk_name);
+       if (!(disk->flags & GENHD_FL_HIDDEN))
+               ddev->devt = MKDEV(disk->major, disk->first_minor);
+       ret = device_add(ddev);
+       if (ret)
+               goto out_disk_release_events;
+       if (!sysfs_deprecated) {
+               ret = sysfs_create_link(block_depr, &ddev->kobj,
+                                       kobject_name(&ddev->kobj));
+               if (ret)
+                       goto out_device_del;
+       }
+
+       /*
+        * avoid probable deadlock caused by allocating memory with
+        * GFP_KERNEL in runtime_resume callback of its all ancestor
+        * devices
+        */
+       pm_runtime_set_memalloc_noio(ddev, true);
+
+       ret = blk_integrity_add(disk);
+       if (ret)
+               goto out_del_block_link;
+
+       disk->part0->bd_holder_dir =
+               kobject_create_and_add("holders", &ddev->kobj);
+       if (!disk->part0->bd_holder_dir)
+               goto out_del_integrity;
+       disk->slave_dir = kobject_create_and_add("slaves", &ddev->kobj);
+       if (!disk->slave_dir)
+               goto out_put_holder_dir;
+
+       ret = bd_register_pending_holders(disk);
+       if (ret < 0)
+               goto out_put_slave_dir;
 
-       disk_alloc_events(disk);
+       ret = blk_register_queue(disk);
+       if (ret)
+               goto out_put_slave_dir;
 
        if (disk->flags & GENHD_FL_HIDDEN) {
                /*
@@ -513,48 +483,56 @@ static void __device_add_disk(struct device *parent, struct gendisk *disk,
                disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO;
                disk->flags |= GENHD_FL_NO_PART_SCAN;
        } else {
-               struct backing_dev_info *bdi = disk->queue->backing_dev_info;
-               struct device *dev = disk_to_dev(disk);
-
-               /* Register BDI before referencing it from bdev */
-               dev->devt = MKDEV(disk->major, disk->first_minor);
-               ret = bdi_register(bdi, "%u:%u",
+               ret = bdi_register(disk->bdi, "%u:%u",
                                   disk->major, disk->first_minor);
-               WARN_ON(ret);
-               bdi_set_owner(bdi, dev);
-               bdev_add(disk->part0, dev->devt);
-       }
-       register_disk(parent, disk, groups);
-       if (register_queue)
-               blk_register_queue(disk);
+               if (ret)
+                       goto out_unregister_queue;
+               bdi_set_owner(disk->bdi, ddev);
+               ret = sysfs_create_link(&ddev->kobj,
+                                       &disk->bdi->dev->kobj, "bdi");
+               if (ret)
+                       goto out_unregister_bdi;
 
-       /*
-        * Take an extra ref on queue which will be put on disk_release()
-        * so that it sticks around as long as @disk is there.
-        */
-       if (blk_get_queue(disk->queue))
-               set_bit(GD_QUEUE_REF, &disk->state);
-       else
-               WARN_ON_ONCE(1);
+               bdev_add(disk->part0, ddev->devt);
+               disk_scan_partitions(disk);
 
-       disk_add_events(disk);
-       blk_integrity_add(disk);
-}
+               /*
+                * Announce the disk and partitions after all partitions are
+                * created. (for hidden disks uevents remain suppressed forever)
+                */
+               dev_set_uevent_suppress(ddev, 0);
+               disk_uevent(disk, KOBJ_ADD);
+       }
 
-void device_add_disk(struct device *parent, struct gendisk *disk,
-                    const struct attribute_group **groups)
+       disk_update_readahead(disk);
+       disk_add_events(disk);
+       return 0;
 
-{
-       __device_add_disk(parent, disk, groups, true);
+out_unregister_bdi:
+       if (!(disk->flags & GENHD_FL_HIDDEN))
+               bdi_unregister(disk->bdi);
+out_unregister_queue:
+       blk_unregister_queue(disk);
+out_put_slave_dir:
+       kobject_put(disk->slave_dir);
+out_put_holder_dir:
+       kobject_put(disk->part0->bd_holder_dir);
+out_del_integrity:
+       blk_integrity_del(disk);
+out_del_block_link:
+       if (!sysfs_deprecated)
+               sysfs_remove_link(block_depr, dev_name(ddev));
+out_device_del:
+       device_del(ddev);
+out_disk_release_events:
+       disk_release_events(disk);
+out_free_ext_minor:
+       if (disk->major == BLOCK_EXT_MAJOR)
+               blk_free_ext_minor(disk->first_minor);
+       return WARN_ON_ONCE(ret); /* keep until all callers handle errors */
 }
 EXPORT_SYMBOL(device_add_disk);
 
-void device_add_disk_no_queue_reg(struct device *parent, struct gendisk *disk)
-{
-       __device_add_disk(parent, disk, NULL, false);
-}
-EXPORT_SYMBOL(device_add_disk_no_queue_reg);
-
 /**
  * del_gendisk - remove the gendisk
  * @disk: the struct gendisk to remove
@@ -578,26 +556,20 @@ void del_gendisk(struct gendisk *disk)
 {
        might_sleep();
 
-       if (WARN_ON_ONCE(!disk->queue))
+       if (WARN_ON_ONCE(!disk_live(disk) && !(disk->flags & GENHD_FL_HIDDEN)))
                return;
 
        blk_integrity_del(disk);
        disk_del_events(disk);
 
        mutex_lock(&disk->open_mutex);
-       disk->flags &= ~GENHD_FL_UP;
+       remove_inode_hash(disk->part0->bd_inode);
        blk_drop_partitions(disk);
        mutex_unlock(&disk->open_mutex);
 
        fsync_bdev(disk->part0);
        __invalidate_device(disk->part0, true);
 
-       /*
-        * Unhash the bdev inode for this device so that it can't be looked
-        * up any more even if openers still hold references to it.
-        */
-       remove_inode_hash(disk->part0->bd_inode);
-
        set_capacity(disk, 0);
 
        if (!(disk->flags & GENHD_FL_HIDDEN)) {
@@ -607,7 +579,7 @@ void del_gendisk(struct gendisk *disk)
                 * Unregister bdi before releasing device numbers (as they can
                 * get reused and we'd get clashes in sysfs).
                 */
-               bdi_unregister(disk->queue->backing_dev_info);
+               bdi_unregister(disk->bdi);
        }
 
        blk_unregister_queue(disk);
@@ -683,7 +655,6 @@ void __init printk_all_partitions(void)
        while ((dev = class_dev_iter_next(&iter))) {
                struct gendisk *disk = dev_to_disk(dev);
                struct block_device *part;
-               char name_buf[BDEVNAME_SIZE];
                char devt_buf[BDEVT_SIZE];
                unsigned long idx;
 
@@ -703,11 +674,10 @@ void __init printk_all_partitions(void)
                xa_for_each(&disk->part_tbl, idx, part) {
                        if (!bdev_nr_sectors(part))
                                continue;
-                       printk("%s%s %10llu %s %s",
+                       printk("%s%s %10llu %pg %s",
                               bdev_is_partition(part) ? "  " : "",
                               bdevt_str(part->bd_dev, devt_buf),
-                              bdev_nr_sectors(part) >> 1,
-                              disk_name(disk, part->bd_partno, name_buf),
+                              bdev_nr_sectors(part) >> 1, part,
                               part->bd_meta_info ?
                                        part->bd_meta_info->uuid : "");
                        if (bdev_is_partition(part))
@@ -785,7 +755,6 @@ static int show_partition(struct seq_file *seqf, void *v)
        struct gendisk *sgp = v;
        struct block_device *part;
        unsigned long idx;
-       char buf[BDEVNAME_SIZE];
 
        /* Don't show non-partitionable removeable devices or empty devices */
        if (!get_capacity(sgp) || (!disk_max_parts(sgp) &&
@@ -798,10 +767,9 @@ static int show_partition(struct seq_file *seqf, void *v)
        xa_for_each(&sgp->part_tbl, idx, part) {
                if (!bdev_nr_sectors(part))
                        continue;
-               seq_printf(seqf, "%4d  %7d %10llu %s\n",
+               seq_printf(seqf, "%4d  %7d %10llu %pg\n",
                           MAJOR(part->bd_dev), MINOR(part->bd_dev),
-                          bdev_nr_sectors(part) >> 1,
-                          disk_name(sgp, part->bd_partno, buf));
+                          bdev_nr_sectors(part) >> 1, part);
        }
        rcu_read_unlock();
        return 0;
@@ -968,6 +936,14 @@ static ssize_t disk_discard_alignment_show(struct device *dev,
        return sprintf(buf, "%d\n", queue_discard_alignment(disk->queue));
 }
 
+static ssize_t diskseq_show(struct device *dev,
+                           struct device_attribute *attr, char *buf)
+{
+       struct gendisk *disk = dev_to_disk(dev);
+
+       return sprintf(buf, "%llu\n", disk->diskseq);
+}
+
 static DEVICE_ATTR(range, 0444, disk_range_show, NULL);
 static DEVICE_ATTR(ext_range, 0444, disk_ext_range_show, NULL);
 static DEVICE_ATTR(removable, 0444, disk_removable_show, NULL);
@@ -980,6 +956,7 @@ static DEVICE_ATTR(capability, 0444, disk_capability_show, NULL);
 static DEVICE_ATTR(stat, 0444, part_stat_show, NULL);
 static DEVICE_ATTR(inflight, 0444, part_inflight_show, NULL);
 static DEVICE_ATTR(badblocks, 0644, disk_badblocks_show, disk_badblocks_store);
+static DEVICE_ATTR(diskseq, 0444, diskseq_show, NULL);
 
 #ifdef CONFIG_FAIL_MAKE_REQUEST
 ssize_t part_fail_show(struct device *dev,
@@ -1025,6 +1002,7 @@ static struct attribute *disk_attrs[] = {
        &dev_attr_events.attr,
        &dev_attr_events_async.attr,
        &dev_attr_events_poll_msecs.attr,
+       &dev_attr_diskseq.attr,
 #ifdef CONFIG_FAIL_MAKE_REQUEST
        &dev_attr_fail.attr,
 #endif
@@ -1074,18 +1052,24 @@ static void disk_release(struct device *dev)
 
        might_sleep();
 
-       if (MAJOR(dev->devt) == BLOCK_EXT_MAJOR)
-               blk_free_ext_minor(MINOR(dev->devt));
        disk_release_events(disk);
        kfree(disk->random);
        xa_destroy(&disk->part_tbl);
-       bdput(disk->part0);
-       if (test_bit(GD_QUEUE_REF, &disk->state) && disk->queue)
-               blk_put_queue(disk->queue);
-       kfree(disk);
+       disk->queue->disk = NULL;
+       blk_put_queue(disk->queue);
+       iput(disk->part0->bd_inode);    /* frees the disk */
 }
+
+static int block_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+       struct gendisk *disk = dev_to_disk(dev);
+
+       return add_uevent_var(env, "DISKSEQ=%llu", disk->diskseq);
+}
+
 struct class block_class = {
        .name           = "block",
+       .dev_uevent     = block_uevent,
 };
 
 static char *block_devnode(struct device *dev, umode_t *mode,
@@ -1117,7 +1101,6 @@ static int diskstats_show(struct seq_file *seqf, void *v)
 {
        struct gendisk *gp = v;
        struct block_device *hd;
-       char buf[BDEVNAME_SIZE];
        unsigned int inflight;
        struct disk_stats stat;
        unsigned long idx;
@@ -1140,15 +1123,14 @@ static int diskstats_show(struct seq_file *seqf, void *v)
                else
                        inflight = part_in_flight(hd);
 
-               seq_printf(seqf, "%4d %7d %s "
+               seq_printf(seqf, "%4d %7d %pg "
                           "%lu %lu %lu %u "
                           "%lu %lu %lu %u "
                           "%u %u %u "
                           "%lu %lu %lu %u "
                           "%lu %u"
                           "\n",
-                          MAJOR(hd->bd_dev), MINOR(hd->bd_dev),
-                          disk_name(gp, hd->bd_partno, buf),
+                          MAJOR(hd->bd_dev), MINOR(hd->bd_dev), hd,
                           stat.ios[STAT_READ],
                           stat.merges[STAT_READ],
                           stat.sectors[STAT_READ],
@@ -1240,17 +1222,25 @@ dev_t blk_lookup_devt(const char *name, int partno)
        return devt;
 }
 
-struct gendisk *__alloc_disk_node(int minors, int node_id)
+struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
+               struct lock_class_key *lkclass)
 {
        struct gendisk *disk;
 
+       if (!blk_get_queue(q))
+               return NULL;
+
        disk = kzalloc_node(sizeof(struct gendisk), GFP_KERNEL, node_id);
        if (!disk)
-               return NULL;
+               goto out_put_queue;
+
+       disk->bdi = bdi_alloc(node_id);
+       if (!disk->bdi)
+               goto out_free_disk;
 
        disk->part0 = bdev_alloc(disk, 0);
        if (!disk->part0)
-               goto out_free_disk;
+               goto out_free_bdi;
 
        disk->node_id = node_id;
        mutex_init(&disk->open_mutex);
@@ -1258,23 +1248,33 @@ struct gendisk *__alloc_disk_node(int minors, int node_id)
        if (xa_insert(&disk->part_tbl, 0, disk->part0, GFP_KERNEL))
                goto out_destroy_part_tbl;
 
-       disk->minors = minors;
        rand_initialize_disk(disk);
        disk_to_dev(disk)->class = &block_class;
        disk_to_dev(disk)->type = &disk_type;
        device_initialize(disk_to_dev(disk));
+       inc_diskseq(disk);
+       disk->queue = q;
+       q->disk = disk;
+       lockdep_init_map(&disk->lockdep_map, "(bio completion)", lkclass, 0);
+#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
+       INIT_LIST_HEAD(&disk->slave_bdevs);
+#endif
        return disk;
 
 out_destroy_part_tbl:
        xa_destroy(&disk->part_tbl);
-       bdput(disk->part0);
+       iput(disk->part0->bd_inode);
+out_free_bdi:
+       bdi_put(disk->bdi);
 out_free_disk:
        kfree(disk);
+out_put_queue:
+       blk_put_queue(q);
        return NULL;
 }
 EXPORT_SYMBOL(__alloc_disk_node);
 
-struct gendisk *__blk_alloc_disk(int node)
+struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass)
 {
        struct request_queue *q;
        struct gendisk *disk;
@@ -1283,12 +1283,11 @@ struct gendisk *__blk_alloc_disk(int node)
        if (!q)
                return NULL;
 
-       disk = __alloc_disk_node(0, node);
+       disk = __alloc_disk_node(q, node, lkclass);
        if (!disk) {
                blk_cleanup_queue(q);
                return NULL;
        }
-       disk->queue = q;
        return disk;
 }
 EXPORT_SYMBOL(__blk_alloc_disk);
@@ -1363,3 +1362,8 @@ int bdev_read_only(struct block_device *bdev)
        return bdev->bd_read_only || get_disk_ro(bdev->bd_disk);
 }
 EXPORT_SYMBOL(bdev_read_only);
+
+void inc_diskseq(struct gendisk *disk)
+{
+       disk->diskseq = atomic64_inc_return(&diskseq);
+}
diff --git a/block/holder.c b/block/holder.c
new file mode 100644 (file)
index 0000000..9dc0841
--- /dev/null
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/genhd.h>
+
+struct bd_holder_disk {
+       struct list_head        list;
+       struct block_device     *bdev;
+       int                     refcnt;
+};
+
+static struct bd_holder_disk *bd_find_holder_disk(struct block_device *bdev,
+                                                 struct gendisk *disk)
+{
+       struct bd_holder_disk *holder;
+
+       list_for_each_entry(holder, &disk->slave_bdevs, list)
+               if (holder->bdev == bdev)
+                       return holder;
+       return NULL;
+}
+
+static int add_symlink(struct kobject *from, struct kobject *to)
+{
+       return sysfs_create_link(from, to, kobject_name(to));
+}
+
+static void del_symlink(struct kobject *from, struct kobject *to)
+{
+       sysfs_remove_link(from, kobject_name(to));
+}
+
+static int __link_disk_holder(struct block_device *bdev, struct gendisk *disk)
+{
+       int ret;
+
+       ret = add_symlink(disk->slave_dir, bdev_kobj(bdev));
+       if (ret)
+               return ret;
+       ret = add_symlink(bdev->bd_holder_dir, &disk_to_dev(disk)->kobj);
+       if (ret)
+               del_symlink(disk->slave_dir, bdev_kobj(bdev));
+       return ret;
+}
+
+/**
+ * bd_link_disk_holder - create symlinks between holding disk and slave bdev
+ * @bdev: the claimed slave bdev
+ * @disk: the holding disk
+ *
+ * DON'T USE THIS UNLESS YOU'RE ALREADY USING IT.
+ *
+ * This functions creates the following sysfs symlinks.
+ *
+ * - from "slaves" directory of the holder @disk to the claimed @bdev
+ * - from "holders" directory of the @bdev to the holder @disk
+ *
+ * For example, if /dev/dm-0 maps to /dev/sda and disk for dm-0 is
+ * passed to bd_link_disk_holder(), then:
+ *
+ *   /sys/block/dm-0/slaves/sda --> /sys/block/sda
+ *   /sys/block/sda/holders/dm-0 --> /sys/block/dm-0
+ *
+ * The caller must have claimed @bdev before calling this function and
+ * ensure that both @bdev and @disk are valid during the creation and
+ * lifetime of these symlinks.
+ *
+ * CONTEXT:
+ * Might sleep.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk)
+{
+       struct bd_holder_disk *holder;
+       int ret = 0;
+
+       mutex_lock(&disk->open_mutex);
+
+       WARN_ON_ONCE(!bdev->bd_holder);
+
+       /* FIXME: remove the following once add_disk() handles errors */
+       if (WARN_ON(!bdev->bd_holder_dir))
+               goto out_unlock;
+
+       holder = bd_find_holder_disk(bdev, disk);
+       if (holder) {
+               holder->refcnt++;
+               goto out_unlock;
+       }
+
+       holder = kzalloc(sizeof(*holder), GFP_KERNEL);
+       if (!holder) {
+               ret = -ENOMEM;
+               goto out_unlock;
+       }
+
+       INIT_LIST_HEAD(&holder->list);
+       holder->bdev = bdev;
+       holder->refcnt = 1;
+       if (disk->slave_dir) {
+               ret = __link_disk_holder(bdev, disk);
+               if (ret) {
+                       kfree(holder);
+                       goto out_unlock;
+               }
+       }
+
+       list_add(&holder->list, &disk->slave_bdevs);
+       /*
+        * del_gendisk drops the initial reference to bd_holder_dir, so we need
+        * to keep our own here to allow for cleanup past that point.
+        */
+       kobject_get(bdev->bd_holder_dir);
+
+out_unlock:
+       mutex_unlock(&disk->open_mutex);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(bd_link_disk_holder);
+
+static void __unlink_disk_holder(struct block_device *bdev,
+               struct gendisk *disk)
+{
+       del_symlink(disk->slave_dir, bdev_kobj(bdev));
+       del_symlink(bdev->bd_holder_dir, &disk_to_dev(disk)->kobj);
+}
+
+/**
+ * bd_unlink_disk_holder - destroy symlinks created by bd_link_disk_holder()
+ * @bdev: the calimed slave bdev
+ * @disk: the holding disk
+ *
+ * DON'T USE THIS UNLESS YOU'RE ALREADY USING IT.
+ *
+ * CONTEXT:
+ * Might sleep.
+ */
+void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk)
+{
+       struct bd_holder_disk *holder;
+
+       mutex_lock(&disk->open_mutex);
+       holder = bd_find_holder_disk(bdev, disk);
+       if (!WARN_ON_ONCE(holder == NULL) && !--holder->refcnt) {
+               if (disk->slave_dir)
+                       __unlink_disk_holder(bdev, disk);
+               kobject_put(bdev->bd_holder_dir);
+               list_del_init(&holder->list);
+               kfree(holder);
+       }
+       mutex_unlock(&disk->open_mutex);
+}
+EXPORT_SYMBOL_GPL(bd_unlink_disk_holder);
+
+int bd_register_pending_holders(struct gendisk *disk)
+{
+       struct bd_holder_disk *holder;
+       int ret;
+
+       mutex_lock(&disk->open_mutex);
+       list_for_each_entry(holder, &disk->slave_bdevs, list) {
+               ret = __link_disk_holder(holder->bdev, disk);
+               if (ret)
+                       goto out_undo;
+       }
+       mutex_unlock(&disk->open_mutex);
+       return 0;
+
+out_undo:
+       list_for_each_entry_continue_reverse(holder, &disk->slave_bdevs, list)
+               __unlink_disk_holder(holder->bdev, disk);
+       mutex_unlock(&disk->open_mutex);
+       return ret;
+}
index 24beec9..eb0491e 100644 (file)
@@ -16,6 +16,7 @@
 static int blkpg_do_ioctl(struct block_device *bdev,
                          struct blkpg_partition __user *upart, int op)
 {
+       struct gendisk *disk = bdev->bd_disk;
        struct blkpg_partition p;
        long long start, length;
 
@@ -30,7 +31,7 @@ static int blkpg_do_ioctl(struct block_device *bdev,
                return -EINVAL;
 
        if (op == BLKPG_DEL_PARTITION)
-               return bdev_del_partition(bdev, p.pno);
+               return bdev_del_partition(disk, p.pno);
 
        start = p.start >> SECTOR_SHIFT;
        length = p.length >> SECTOR_SHIFT;
@@ -40,9 +41,9 @@ static int blkpg_do_ioctl(struct block_device *bdev,
                /* check if partition is aligned to blocksize */
                if (p.start & (bdev_logical_block_size(bdev) - 1))
                        return -EINVAL;
-               return bdev_add_partition(bdev, p.pno, start, length);
+               return bdev_add_partition(disk, p.pno, start, length);
        case BLKPG_RESIZE_PARTITION:
-               return bdev_resize_partition(bdev, p.pno, start, length);
+               return bdev_resize_partition(disk, p.pno, start, length);
        default:
                return -EINVAL;
        }
@@ -469,6 +470,8 @@ static int blkdev_common_ioctl(struct block_device *bdev, fmode_t mode,
                                BLKDEV_DISCARD_SECURE);
        case BLKZEROOUT:
                return blk_ioctl_zeroout(bdev, mode, arg);
+       case BLKGETDISKSEQ:
+               return put_u64(argp, bdev->bd_disk->diskseq);
        case BLKREPORTZONE:
                return blkdev_report_zones_ioctl(bdev, mode, cmd, arg);
        case BLKRESETZONE:
@@ -504,7 +507,7 @@ static int blkdev_common_ioctl(struct block_device *bdev, fmode_t mode,
        case BLKFRASET:
                if(!capable(CAP_SYS_ADMIN))
                        return -EACCES;
-               bdev->bd_bdi->ra_pages = (arg * 512) / PAGE_SIZE;
+               bdev->bd_disk->bdi->ra_pages = (arg * 512) / PAGE_SIZE;
                return 0;
        case BLKRRPART:
                return blkdev_reread_part(bdev, mode);
@@ -554,7 +557,8 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
        case BLKFRAGET:
                if (!argp)
                        return -EINVAL;
-               return put_long(argp, (bdev->bd_bdi->ra_pages*PAGE_SIZE) / 512);
+               return put_long(argp,
+                       (bdev->bd_disk->bdi->ra_pages * PAGE_SIZE) / 512);
        case BLKGETSIZE:
                size = i_size_read(bdev->bd_inode);
                if ((size >> 9) > ~0UL)
@@ -626,7 +630,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
                if (!argp)
                        return -EINVAL;
                return compat_put_long(argp,
-                              (bdev->bd_bdi->ra_pages * PAGE_SIZE) / 512);
+                       (bdev->bd_disk->bdi->ra_pages * PAGE_SIZE) / 512);
        case BLKGETSIZE:
                size = i_size_read(bdev->bd_inode);
                if ((size >> 9) > ~0UL)
index bee628f..0e4ff24 100644 (file)
@@ -74,9 +74,8 @@ int ioprio_check_cap(int ioprio)
                        fallthrough;
                        /* rt has prio field too */
                case IOPRIO_CLASS_BE:
-                       if (data >= IOPRIO_BE_NR || data < 0)
+                       if (data >= IOPRIO_NR_LEVELS || data < 0)
                                return -EINVAL;
-
                        break;
                case IOPRIO_CLASS_IDLE:
                        break;
@@ -171,7 +170,7 @@ static int get_task_ioprio(struct task_struct *p)
        ret = security_task_getioprio(p);
        if (ret)
                goto out;
-       ret = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, IOPRIO_NORM);
+       ret = IOPRIO_DEFAULT;
        task_lock(p);
        if (p->io_context)
                ret = p->io_context->ioprio;
@@ -183,9 +182,9 @@ out:
 int ioprio_best(unsigned short aprio, unsigned short bprio)
 {
        if (!ioprio_valid(aprio))
-               aprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
+               aprio = IOPRIO_DEFAULT;
        if (!ioprio_valid(bprio))
-               bprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
+               bprio = IOPRIO_DEFAULT;
 
        return min(aprio, bprio);
 }
index 81e3279..15a8be5 100644 (file)
@@ -596,13 +596,13 @@ static void kyber_insert_requests(struct blk_mq_hw_ctx *hctx,
                struct list_head *head = &kcq->rq_list[sched_domain];
 
                spin_lock(&kcq->lock);
+               trace_block_rq_insert(rq);
                if (at_head)
                        list_move(&rq->queuelist, head);
                else
                        list_move_tail(&rq->queuelist, head);
                sbitmap_set_bit(&khd->kcq_map[sched_domain],
                                rq->mq_ctx->index_hw[hctx->type]);
-               trace_block_rq_insert(rq);
                spin_unlock(&kcq->lock);
        }
 }
diff --git a/block/mq-deadline-cgroup.c b/block/mq-deadline-cgroup.c
deleted file mode 100644 (file)
index 3b4bfdd..0000000
+++ /dev/null
@@ -1,126 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include <linux/blk-cgroup.h>
-#include <linux/ioprio.h>
-
-#include "mq-deadline-cgroup.h"
-
-static struct blkcg_policy dd_blkcg_policy;
-
-static struct blkcg_policy_data *dd_cpd_alloc(gfp_t gfp)
-{
-       struct dd_blkcg *pd;
-
-       pd = kzalloc(sizeof(*pd), gfp);
-       if (!pd)
-               return NULL;
-       pd->stats = alloc_percpu_gfp(typeof(*pd->stats),
-                                    GFP_KERNEL | __GFP_ZERO);
-       if (!pd->stats) {
-               kfree(pd);
-               return NULL;
-       }
-       return &pd->cpd;
-}
-
-static void dd_cpd_free(struct blkcg_policy_data *cpd)
-{
-       struct dd_blkcg *dd_blkcg = container_of(cpd, typeof(*dd_blkcg), cpd);
-
-       free_percpu(dd_blkcg->stats);
-       kfree(dd_blkcg);
-}
-
-static struct dd_blkcg *dd_blkcg_from_pd(struct blkg_policy_data *pd)
-{
-       return container_of(blkcg_to_cpd(pd->blkg->blkcg, &dd_blkcg_policy),
-                           struct dd_blkcg, cpd);
-}
-
-/*
- * Convert an association between a block cgroup and a request queue into a
- * pointer to the mq-deadline information associated with a (blkcg, queue) pair.
- */
-struct dd_blkcg *dd_blkcg_from_bio(struct bio *bio)
-{
-       struct blkg_policy_data *pd;
-
-       pd = blkg_to_pd(bio->bi_blkg, &dd_blkcg_policy);
-       if (!pd)
-               return NULL;
-
-       return dd_blkcg_from_pd(pd);
-}
-
-static size_t dd_pd_stat(struct blkg_policy_data *pd, char *buf, size_t size)
-{
-       static const char *const prio_class_name[] = {
-               [IOPRIO_CLASS_NONE]     = "NONE",
-               [IOPRIO_CLASS_RT]       = "RT",
-               [IOPRIO_CLASS_BE]       = "BE",
-               [IOPRIO_CLASS_IDLE]     = "IDLE",
-       };
-       struct dd_blkcg *blkcg = dd_blkcg_from_pd(pd);
-       int res = 0;
-       u8 prio;
-
-       for (prio = 0; prio < ARRAY_SIZE(blkcg->stats->stats); prio++)
-               res += scnprintf(buf + res, size - res,
-                       " [%s] dispatched=%u inserted=%u merged=%u",
-                       prio_class_name[prio],
-                       ddcg_sum(blkcg, dispatched, prio) +
-                       ddcg_sum(blkcg, merged, prio) -
-                       ddcg_sum(blkcg, completed, prio),
-                       ddcg_sum(blkcg, inserted, prio) -
-                       ddcg_sum(blkcg, completed, prio),
-                       ddcg_sum(blkcg, merged, prio));
-
-       return res;
-}
-
-static struct blkg_policy_data *dd_pd_alloc(gfp_t gfp, struct request_queue *q,
-                                           struct blkcg *blkcg)
-{
-       struct dd_blkg *pd;
-
-       pd = kzalloc(sizeof(*pd), gfp);
-       if (!pd)
-               return NULL;
-       return &pd->pd;
-}
-
-static void dd_pd_free(struct blkg_policy_data *pd)
-{
-       struct dd_blkg *dd_blkg = container_of(pd, typeof(*dd_blkg), pd);
-
-       kfree(dd_blkg);
-}
-
-static struct blkcg_policy dd_blkcg_policy = {
-       .cpd_alloc_fn           = dd_cpd_alloc,
-       .cpd_free_fn            = dd_cpd_free,
-
-       .pd_alloc_fn            = dd_pd_alloc,
-       .pd_free_fn             = dd_pd_free,
-       .pd_stat_fn             = dd_pd_stat,
-};
-
-int dd_activate_policy(struct request_queue *q)
-{
-       return blkcg_activate_policy(q, &dd_blkcg_policy);
-}
-
-void dd_deactivate_policy(struct request_queue *q)
-{
-       blkcg_deactivate_policy(q, &dd_blkcg_policy);
-}
-
-int __init dd_blkcg_init(void)
-{
-       return blkcg_policy_register(&dd_blkcg_policy);
-}
-
-void __exit dd_blkcg_exit(void)
-{
-       blkcg_policy_unregister(&dd_blkcg_policy);
-}
diff --git a/block/mq-deadline-cgroup.h b/block/mq-deadline-cgroup.h
deleted file mode 100644 (file)
index 0143fd7..0000000
+++ /dev/null
@@ -1,114 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-#if !defined(_MQ_DEADLINE_CGROUP_H_)
-#define _MQ_DEADLINE_CGROUP_H_
-
-#include <linux/blk-cgroup.h>
-
-struct request_queue;
-
-/**
- * struct io_stats_per_prio - I/O statistics per I/O priority class.
- * @inserted: Number of inserted requests.
- * @merged: Number of merged requests.
- * @dispatched: Number of dispatched requests.
- * @completed: Number of I/O completions.
- */
-struct io_stats_per_prio {
-       local_t inserted;
-       local_t merged;
-       local_t dispatched;
-       local_t completed;
-};
-
-/* I/O statistics per I/O cgroup per I/O priority class (IOPRIO_CLASS_*). */
-struct blkcg_io_stats {
-       struct io_stats_per_prio stats[4];
-};
-
-/**
- * struct dd_blkcg - Per cgroup data.
- * @cpd: blkcg_policy_data structure.
- * @stats: I/O statistics.
- */
-struct dd_blkcg {
-       struct blkcg_policy_data cpd;   /* must be the first member */
-       struct blkcg_io_stats __percpu *stats;
-};
-
-/*
- * Count one event of type 'event_type' and with I/O priority class
- * 'prio_class'.
- */
-#define ddcg_count(ddcg, event_type, prio_class) do {                  \
-if (ddcg) {                                                            \
-       struct blkcg_io_stats *io_stats = get_cpu_ptr((ddcg)->stats);   \
-                                                                       \
-       BUILD_BUG_ON(!__same_type((ddcg), struct dd_blkcg *));          \
-       BUILD_BUG_ON(!__same_type((prio_class), u8));                   \
-       local_inc(&io_stats->stats[(prio_class)].event_type);           \
-       put_cpu_ptr(io_stats);                                          \
-}                                                                      \
-} while (0)
-
-/*
- * Returns the total number of ddcg_count(ddcg, event_type, prio_class) calls
- * across all CPUs. No locking or barriers since it is fine if the returned
- * sum is slightly outdated.
- */
-#define ddcg_sum(ddcg, event_type, prio) ({                            \
-       unsigned int cpu;                                               \
-       u32 sum = 0;                                                    \
-                                                                       \
-       BUILD_BUG_ON(!__same_type((ddcg), struct dd_blkcg *));          \
-       BUILD_BUG_ON(!__same_type((prio), u8));                         \
-       for_each_present_cpu(cpu)                                       \
-               sum += local_read(&per_cpu_ptr((ddcg)->stats, cpu)->    \
-                                 stats[(prio)].event_type);            \
-       sum;                                                            \
-})
-
-#ifdef CONFIG_BLK_CGROUP
-
-/**
- * struct dd_blkg - Per (cgroup, request queue) data.
- * @pd: blkg_policy_data structure.
- */
-struct dd_blkg {
-       struct blkg_policy_data pd;     /* must be the first member */
-};
-
-struct dd_blkcg *dd_blkcg_from_bio(struct bio *bio);
-int dd_activate_policy(struct request_queue *q);
-void dd_deactivate_policy(struct request_queue *q);
-int __init dd_blkcg_init(void);
-void __exit dd_blkcg_exit(void);
-
-#else /* CONFIG_BLK_CGROUP */
-
-static inline struct dd_blkcg *dd_blkcg_from_bio(struct bio *bio)
-{
-       return NULL;
-}
-
-static inline int dd_activate_policy(struct request_queue *q)
-{
-       return 0;
-}
-
-static inline void dd_deactivate_policy(struct request_queue *q)
-{
-}
-
-static inline int dd_blkcg_init(void)
-{
-       return 0;
-}
-
-static inline void dd_blkcg_exit(void)
-{
-}
-
-#endif /* CONFIG_BLK_CGROUP */
-
-#endif /* _MQ_DEADLINE_CGROUP_H_ */
diff --git a/block/mq-deadline-main.c b/block/mq-deadline-main.c
deleted file mode 100644 (file)
index 6f612e6..0000000
+++ /dev/null
@@ -1,1175 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- *  MQ Deadline i/o scheduler - adaptation of the legacy deadline scheduler,
- *  for the blk-mq scheduling framework
- *
- *  Copyright (C) 2016 Jens Axboe <axboe@kernel.dk>
- */
-#include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/blkdev.h>
-#include <linux/blk-mq.h>
-#include <linux/elevator.h>
-#include <linux/bio.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/compiler.h>
-#include <linux/rbtree.h>
-#include <linux/sbitmap.h>
-
-#include <trace/events/block.h>
-
-#include "blk.h"
-#include "blk-mq.h"
-#include "blk-mq-debugfs.h"
-#include "blk-mq-tag.h"
-#include "blk-mq-sched.h"
-#include "mq-deadline-cgroup.h"
-
-/*
- * See Documentation/block/deadline-iosched.rst
- */
-static const int read_expire = HZ / 2;  /* max time before a read is submitted. */
-static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
-/*
- * Time after which to dispatch lower priority requests even if higher
- * priority requests are pending.
- */
-static const int aging_expire = 10 * HZ;
-static const int writes_starved = 2;    /* max times reads can starve a write */
-static const int fifo_batch = 16;       /* # of sequential requests treated as one
-                                    by the above parameters. For throughput. */
-
-enum dd_data_dir {
-       DD_READ         = READ,
-       DD_WRITE        = WRITE,
-};
-
-enum { DD_DIR_COUNT = 2 };
-
-enum dd_prio {
-       DD_RT_PRIO      = 0,
-       DD_BE_PRIO      = 1,
-       DD_IDLE_PRIO    = 2,
-       DD_PRIO_MAX     = 2,
-};
-
-enum { DD_PRIO_COUNT = 3 };
-
-/* I/O statistics for all I/O priorities (enum dd_prio). */
-struct io_stats {
-       struct io_stats_per_prio stats[DD_PRIO_COUNT];
-};
-
-/*
- * Deadline scheduler data per I/O priority (enum dd_prio). Requests are
- * present on both sort_list[] and fifo_list[].
- */
-struct dd_per_prio {
-       struct list_head dispatch;
-       struct rb_root sort_list[DD_DIR_COUNT];
-       struct list_head fifo_list[DD_DIR_COUNT];
-       /* Next request in FIFO order. Read, write or both are NULL. */
-       struct request *next_rq[DD_DIR_COUNT];
-};
-
-struct deadline_data {
-       /*
-        * run time data
-        */
-
-       /* Request queue that owns this data structure. */
-       struct request_queue *queue;
-
-       struct dd_per_prio per_prio[DD_PRIO_COUNT];
-
-       /* Data direction of latest dispatched request. */
-       enum dd_data_dir last_dir;
-       unsigned int batching;          /* number of sequential requests made */
-       unsigned int starved;           /* times reads have starved writes */
-
-       struct io_stats __percpu *stats;
-
-       /*
-        * settings that change how the i/o scheduler behaves
-        */
-       int fifo_expire[DD_DIR_COUNT];
-       int fifo_batch;
-       int writes_starved;
-       int front_merges;
-       u32 async_depth;
-       int aging_expire;
-
-       spinlock_t lock;
-       spinlock_t zone_lock;
-};
-
-/* Count one event of type 'event_type' and with I/O priority 'prio' */
-#define dd_count(dd, event_type, prio) do {                            \
-       struct io_stats *io_stats = get_cpu_ptr((dd)->stats);           \
-                                                                       \
-       BUILD_BUG_ON(!__same_type((dd), struct deadline_data *));       \
-       BUILD_BUG_ON(!__same_type((prio), enum dd_prio));               \
-       local_inc(&io_stats->stats[(prio)].event_type);                 \
-       put_cpu_ptr(io_stats);                                          \
-} while (0)
-
-/*
- * Returns the total number of dd_count(dd, event_type, prio) calls across all
- * CPUs. No locking or barriers since it is fine if the returned sum is slightly
- * outdated.
- */
-#define dd_sum(dd, event_type, prio) ({                                        \
-       unsigned int cpu;                                               \
-       u32 sum = 0;                                                    \
-                                                                       \
-       BUILD_BUG_ON(!__same_type((dd), struct deadline_data *));       \
-       BUILD_BUG_ON(!__same_type((prio), enum dd_prio));               \
-       for_each_present_cpu(cpu)                                       \
-               sum += local_read(&per_cpu_ptr((dd)->stats, cpu)->      \
-                                 stats[(prio)].event_type);            \
-       sum;                                                            \
-})
-
-/* Maps an I/O priority class to a deadline scheduler priority. */
-static const enum dd_prio ioprio_class_to_prio[] = {
-       [IOPRIO_CLASS_NONE]     = DD_BE_PRIO,
-       [IOPRIO_CLASS_RT]       = DD_RT_PRIO,
-       [IOPRIO_CLASS_BE]       = DD_BE_PRIO,
-       [IOPRIO_CLASS_IDLE]     = DD_IDLE_PRIO,
-};
-
-static inline struct rb_root *
-deadline_rb_root(struct dd_per_prio *per_prio, struct request *rq)
-{
-       return &per_prio->sort_list[rq_data_dir(rq)];
-}
-
-/*
- * Returns the I/O priority class (IOPRIO_CLASS_*) that has been assigned to a
- * request.
- */
-static u8 dd_rq_ioclass(struct request *rq)
-{
-       return IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
-}
-
-/*
- * get the request after `rq' in sector-sorted order
- */
-static inline struct request *
-deadline_latter_request(struct request *rq)
-{
-       struct rb_node *node = rb_next(&rq->rb_node);
-
-       if (node)
-               return rb_entry_rq(node);
-
-       return NULL;
-}
-
-static void
-deadline_add_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
-{
-       struct rb_root *root = deadline_rb_root(per_prio, rq);
-
-       elv_rb_add(root, rq);
-}
-
-static inline void
-deadline_del_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
-{
-       const enum dd_data_dir data_dir = rq_data_dir(rq);
-
-       if (per_prio->next_rq[data_dir] == rq)
-               per_prio->next_rq[data_dir] = deadline_latter_request(rq);
-
-       elv_rb_del(deadline_rb_root(per_prio, rq), rq);
-}
-
-/*
- * remove rq from rbtree and fifo.
- */
-static void deadline_remove_request(struct request_queue *q,
-                                   struct dd_per_prio *per_prio,
-                                   struct request *rq)
-{
-       list_del_init(&rq->queuelist);
-
-       /*
-        * We might not be on the rbtree, if we are doing an insert merge
-        */
-       if (!RB_EMPTY_NODE(&rq->rb_node))
-               deadline_del_rq_rb(per_prio, rq);
-
-       elv_rqhash_del(q, rq);
-       if (q->last_merge == rq)
-               q->last_merge = NULL;
-}
-
-static void dd_request_merged(struct request_queue *q, struct request *req,
-                             enum elv_merge type)
-{
-       struct deadline_data *dd = q->elevator->elevator_data;
-       const u8 ioprio_class = dd_rq_ioclass(req);
-       const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
-       struct dd_per_prio *per_prio = &dd->per_prio[prio];
-
-       /*
-        * if the merge was a front merge, we need to reposition request
-        */
-       if (type == ELEVATOR_FRONT_MERGE) {
-               elv_rb_del(deadline_rb_root(per_prio, req), req);
-               deadline_add_rq_rb(per_prio, req);
-       }
-}
-
-/*
- * Callback function that is invoked after @next has been merged into @req.
- */
-static void dd_merged_requests(struct request_queue *q, struct request *req,
-                              struct request *next)
-{
-       struct deadline_data *dd = q->elevator->elevator_data;
-       const u8 ioprio_class = dd_rq_ioclass(next);
-       const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
-       struct dd_blkcg *blkcg = next->elv.priv[0];
-
-       dd_count(dd, merged, prio);
-       ddcg_count(blkcg, merged, ioprio_class);
-
-       /*
-        * if next expires before rq, assign its expire time to rq
-        * and move into next position (next will be deleted) in fifo
-        */
-       if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
-               if (time_before((unsigned long)next->fifo_time,
-                               (unsigned long)req->fifo_time)) {
-                       list_move(&req->queuelist, &next->queuelist);
-                       req->fifo_time = next->fifo_time;
-               }
-       }
-
-       /*
-        * kill knowledge of next, this one is a goner
-        */
-       deadline_remove_request(q, &dd->per_prio[prio], next);
-}
-
-/*
- * move an entry to dispatch queue
- */
-static void
-deadline_move_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
-                     struct request *rq)
-{
-       const enum dd_data_dir data_dir = rq_data_dir(rq);
-
-       per_prio->next_rq[data_dir] = deadline_latter_request(rq);
-
-       /*
-        * take it off the sort and fifo list
-        */
-       deadline_remove_request(rq->q, per_prio, rq);
-}
-
-/* Number of requests queued for a given priority level. */
-static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio)
-{
-       return dd_sum(dd, inserted, prio) - dd_sum(dd, completed, prio);
-}
-
-/*
- * deadline_check_fifo returns 0 if there are no expired requests on the fifo,
- * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
- */
-static inline int deadline_check_fifo(struct dd_per_prio *per_prio,
-                                     enum dd_data_dir data_dir)
-{
-       struct request *rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
-
-       /*
-        * rq is expired!
-        */
-       if (time_after_eq(jiffies, (unsigned long)rq->fifo_time))
-               return 1;
-
-       return 0;
-}
-
-/*
- * For the specified data direction, return the next request to
- * dispatch using arrival ordered lists.
- */
-static struct request *
-deadline_fifo_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
-                     enum dd_data_dir data_dir)
-{
-       struct request *rq;
-       unsigned long flags;
-
-       if (list_empty(&per_prio->fifo_list[data_dir]))
-               return NULL;
-
-       rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
-       if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q))
-               return rq;
-
-       /*
-        * Look for a write request that can be dispatched, that is one with
-        * an unlocked target zone.
-        */
-       spin_lock_irqsave(&dd->zone_lock, flags);
-       list_for_each_entry(rq, &per_prio->fifo_list[DD_WRITE], queuelist) {
-               if (blk_req_can_dispatch_to_zone(rq))
-                       goto out;
-       }
-       rq = NULL;
-out:
-       spin_unlock_irqrestore(&dd->zone_lock, flags);
-
-       return rq;
-}
-
-/*
- * For the specified data direction, return the next request to
- * dispatch using sector position sorted lists.
- */
-static struct request *
-deadline_next_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
-                     enum dd_data_dir data_dir)
-{
-       struct request *rq;
-       unsigned long flags;
-
-       rq = per_prio->next_rq[data_dir];
-       if (!rq)
-               return NULL;
-
-       if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q))
-               return rq;
-
-       /*
-        * Look for a write request that can be dispatched, that is one with
-        * an unlocked target zone.
-        */
-       spin_lock_irqsave(&dd->zone_lock, flags);
-       while (rq) {
-               if (blk_req_can_dispatch_to_zone(rq))
-                       break;
-               rq = deadline_latter_request(rq);
-       }
-       spin_unlock_irqrestore(&dd->zone_lock, flags);
-
-       return rq;
-}
-
-/*
- * deadline_dispatch_requests selects the best request according to
- * read/write expire, fifo_batch, etc and with a start time <= @latest.
- */
-static struct request *__dd_dispatch_request(struct deadline_data *dd,
-                                            struct dd_per_prio *per_prio,
-                                            u64 latest_start_ns)
-{
-       struct request *rq, *next_rq;
-       enum dd_data_dir data_dir;
-       struct dd_blkcg *blkcg;
-       enum dd_prio prio;
-       u8 ioprio_class;
-
-       lockdep_assert_held(&dd->lock);
-
-       if (!list_empty(&per_prio->dispatch)) {
-               rq = list_first_entry(&per_prio->dispatch, struct request,
-                                     queuelist);
-               if (rq->start_time_ns > latest_start_ns)
-                       return NULL;
-               list_del_init(&rq->queuelist);
-               goto done;
-       }
-
-       /*
-        * batches are currently reads XOR writes
-        */
-       rq = deadline_next_request(dd, per_prio, dd->last_dir);
-       if (rq && dd->batching < dd->fifo_batch)
-               /* we have a next request are still entitled to batch */
-               goto dispatch_request;
-
-       /*
-        * at this point we are not running a batch. select the appropriate
-        * data direction (read / write)
-        */
-
-       if (!list_empty(&per_prio->fifo_list[DD_READ])) {
-               BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_READ]));
-
-               if (deadline_fifo_request(dd, per_prio, DD_WRITE) &&
-                   (dd->starved++ >= dd->writes_starved))
-                       goto dispatch_writes;
-
-               data_dir = DD_READ;
-
-               goto dispatch_find_request;
-       }
-
-       /*
-        * there are either no reads or writes have been starved
-        */
-
-       if (!list_empty(&per_prio->fifo_list[DD_WRITE])) {
-dispatch_writes:
-               BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_WRITE]));
-
-               dd->starved = 0;
-
-               data_dir = DD_WRITE;
-
-               goto dispatch_find_request;
-       }
-
-       return NULL;
-
-dispatch_find_request:
-       /*
-        * we are not running a batch, find best request for selected data_dir
-        */
-       next_rq = deadline_next_request(dd, per_prio, data_dir);
-       if (deadline_check_fifo(per_prio, data_dir) || !next_rq) {
-               /*
-                * A deadline has expired, the last request was in the other
-                * direction, or we have run out of higher-sectored requests.
-                * Start again from the request with the earliest expiry time.
-                */
-               rq = deadline_fifo_request(dd, per_prio, data_dir);
-       } else {
-               /*
-                * The last req was the same dir and we have a next request in
-                * sort order. No expired requests so continue on from here.
-                */
-               rq = next_rq;
-       }
-
-       /*
-        * For a zoned block device, if we only have writes queued and none of
-        * them can be dispatched, rq will be NULL.
-        */
-       if (!rq)
-               return NULL;
-
-       dd->last_dir = data_dir;
-       dd->batching = 0;
-
-dispatch_request:
-       if (rq->start_time_ns > latest_start_ns)
-               return NULL;
-       /*
-        * rq is the selected appropriate request.
-        */
-       dd->batching++;
-       deadline_move_request(dd, per_prio, rq);
-done:
-       ioprio_class = dd_rq_ioclass(rq);
-       prio = ioprio_class_to_prio[ioprio_class];
-       dd_count(dd, dispatched, prio);
-       blkcg = rq->elv.priv[0];
-       ddcg_count(blkcg, dispatched, ioprio_class);
-       /*
-        * If the request needs its target zone locked, do it.
-        */
-       blk_req_zone_write_lock(rq);
-       rq->rq_flags |= RQF_STARTED;
-       return rq;
-}
-
-/*
- * Called from blk_mq_run_hw_queue() -> __blk_mq_sched_dispatch_requests().
- *
- * One confusing aspect here is that we get called for a specific
- * hardware queue, but we may return a request that is for a
- * different hardware queue. This is because mq-deadline has shared
- * state for all hardware queues, in terms of sorting, FIFOs, etc.
- */
-static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
-{
-       struct deadline_data *dd = hctx->queue->elevator->elevator_data;
-       const u64 now_ns = ktime_get_ns();
-       struct request *rq = NULL;
-       enum dd_prio prio;
-
-       spin_lock(&dd->lock);
-       /*
-        * Start with dispatching requests whose deadline expired more than
-        * aging_expire jiffies ago.
-        */
-       for (prio = DD_BE_PRIO; prio <= DD_PRIO_MAX; prio++) {
-               rq = __dd_dispatch_request(dd, &dd->per_prio[prio], now_ns -
-                                          jiffies_to_nsecs(dd->aging_expire));
-               if (rq)
-                       goto unlock;
-       }
-       /*
-        * Next, dispatch requests in priority order. Ignore lower priority
-        * requests if any higher priority requests are pending.
-        */
-       for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
-               rq = __dd_dispatch_request(dd, &dd->per_prio[prio], now_ns);
-               if (rq || dd_queued(dd, prio))
-                       break;
-       }
-
-unlock:
-       spin_unlock(&dd->lock);
-
-       return rq;
-}
-
-/*
- * Called by __blk_mq_alloc_request(). The shallow_depth value set by this
- * function is used by __blk_mq_get_tag().
- */
-static void dd_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
-{
-       struct deadline_data *dd = data->q->elevator->elevator_data;
-
-       /* Do not throttle synchronous reads. */
-       if (op_is_sync(op) && !op_is_write(op))
-               return;
-
-       /*
-        * Throttle asynchronous requests and writes such that these requests
-        * do not block the allocation of synchronous requests.
-        */
-       data->shallow_depth = dd->async_depth;
-}
-
-/* Called by blk_mq_update_nr_requests(). */
-static void dd_depth_updated(struct blk_mq_hw_ctx *hctx)
-{
-       struct request_queue *q = hctx->queue;
-       struct deadline_data *dd = q->elevator->elevator_data;
-       struct blk_mq_tags *tags = hctx->sched_tags;
-
-       dd->async_depth = max(1UL, 3 * q->nr_requests / 4);
-
-       sbitmap_queue_min_shallow_depth(tags->bitmap_tags, dd->async_depth);
-}
-
-/* Called by blk_mq_init_hctx() and blk_mq_init_sched(). */
-static int dd_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
-{
-       dd_depth_updated(hctx);
-       return 0;
-}
-
-static void dd_exit_sched(struct elevator_queue *e)
-{
-       struct deadline_data *dd = e->elevator_data;
-       enum dd_prio prio;
-
-       dd_deactivate_policy(dd->queue);
-
-       for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
-               struct dd_per_prio *per_prio = &dd->per_prio[prio];
-
-               WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_READ]));
-               WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_WRITE]));
-       }
-
-       free_percpu(dd->stats);
-
-       kfree(dd);
-}
-
-/*
- * Initialize elevator private data (deadline_data) and associate with blkcg.
- */
-static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
-{
-       struct deadline_data *dd;
-       struct elevator_queue *eq;
-       enum dd_prio prio;
-       int ret = -ENOMEM;
-
-       /*
-        * Initialization would be very tricky if the queue is not frozen,
-        * hence the warning statement below.
-        */
-       WARN_ON_ONCE(!percpu_ref_is_zero(&q->q_usage_counter));
-
-       eq = elevator_alloc(q, e);
-       if (!eq)
-               return ret;
-
-       dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
-       if (!dd)
-               goto put_eq;
-
-       eq->elevator_data = dd;
-
-       dd->stats = alloc_percpu_gfp(typeof(*dd->stats),
-                                    GFP_KERNEL | __GFP_ZERO);
-       if (!dd->stats)
-               goto free_dd;
-
-       dd->queue = q;
-
-       for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
-               struct dd_per_prio *per_prio = &dd->per_prio[prio];
-
-               INIT_LIST_HEAD(&per_prio->dispatch);
-               INIT_LIST_HEAD(&per_prio->fifo_list[DD_READ]);
-               INIT_LIST_HEAD(&per_prio->fifo_list[DD_WRITE]);
-               per_prio->sort_list[DD_READ] = RB_ROOT;
-               per_prio->sort_list[DD_WRITE] = RB_ROOT;
-       }
-       dd->fifo_expire[DD_READ] = read_expire;
-       dd->fifo_expire[DD_WRITE] = write_expire;
-       dd->writes_starved = writes_starved;
-       dd->front_merges = 1;
-       dd->last_dir = DD_WRITE;
-       dd->fifo_batch = fifo_batch;
-       dd->aging_expire = aging_expire;
-       spin_lock_init(&dd->lock);
-       spin_lock_init(&dd->zone_lock);
-
-       ret = dd_activate_policy(q);
-       if (ret)
-               goto free_stats;
-
-       ret = 0;
-       q->elevator = eq;
-       return 0;
-
-free_stats:
-       free_percpu(dd->stats);
-
-free_dd:
-       kfree(dd);
-
-put_eq:
-       kobject_put(&eq->kobj);
-       return ret;
-}
-
-/*
- * Try to merge @bio into an existing request. If @bio has been merged into
- * an existing request, store the pointer to that request into *@rq.
- */
-static int dd_request_merge(struct request_queue *q, struct request **rq,
-                           struct bio *bio)
-{
-       struct deadline_data *dd = q->elevator->elevator_data;
-       const u8 ioprio_class = IOPRIO_PRIO_CLASS(bio->bi_ioprio);
-       const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
-       struct dd_per_prio *per_prio = &dd->per_prio[prio];
-       sector_t sector = bio_end_sector(bio);
-       struct request *__rq;
-
-       if (!dd->front_merges)
-               return ELEVATOR_NO_MERGE;
-
-       __rq = elv_rb_find(&per_prio->sort_list[bio_data_dir(bio)], sector);
-       if (__rq) {
-               BUG_ON(sector != blk_rq_pos(__rq));
-
-               if (elv_bio_merge_ok(__rq, bio)) {
-                       *rq = __rq;
-                       return ELEVATOR_FRONT_MERGE;
-               }
-       }
-
-       return ELEVATOR_NO_MERGE;
-}
-
-/*
- * Attempt to merge a bio into an existing request. This function is called
- * before @bio is associated with a request.
- */
-static bool dd_bio_merge(struct request_queue *q, struct bio *bio,
-               unsigned int nr_segs)
-{
-       struct deadline_data *dd = q->elevator->elevator_data;
-       struct request *free = NULL;
-       bool ret;
-
-       spin_lock(&dd->lock);
-       ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
-       spin_unlock(&dd->lock);
-
-       if (free)
-               blk_mq_free_request(free);
-
-       return ret;
-}
-
-/*
- * add rq to rbtree and fifo
- */
-static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
-                             bool at_head)
-{
-       struct request_queue *q = hctx->queue;
-       struct deadline_data *dd = q->elevator->elevator_data;
-       const enum dd_data_dir data_dir = rq_data_dir(rq);
-       u16 ioprio = req_get_ioprio(rq);
-       u8 ioprio_class = IOPRIO_PRIO_CLASS(ioprio);
-       struct dd_per_prio *per_prio;
-       enum dd_prio prio;
-       struct dd_blkcg *blkcg;
-       LIST_HEAD(free);
-
-       lockdep_assert_held(&dd->lock);
-
-       /*
-        * This may be a requeue of a write request that has locked its
-        * target zone. If it is the case, this releases the zone lock.
-        */
-       blk_req_zone_write_unlock(rq);
-
-       /*
-        * If a block cgroup has been associated with the submitter and if an
-        * I/O priority has been set in the associated block cgroup, use the
-        * lowest of the cgroup priority and the request priority for the
-        * request. If no priority has been set in the request, use the cgroup
-        * priority.
-        */
-       prio = ioprio_class_to_prio[ioprio_class];
-       dd_count(dd, inserted, prio);
-       blkcg = dd_blkcg_from_bio(rq->bio);
-       ddcg_count(blkcg, inserted, ioprio_class);
-       rq->elv.priv[0] = blkcg;
-
-       if (blk_mq_sched_try_insert_merge(q, rq, &free)) {
-               blk_mq_free_requests(&free);
-               return;
-       }
-
-       trace_block_rq_insert(rq);
-
-       per_prio = &dd->per_prio[prio];
-       if (at_head) {
-               list_add(&rq->queuelist, &per_prio->dispatch);
-       } else {
-               deadline_add_rq_rb(per_prio, rq);
-
-               if (rq_mergeable(rq)) {
-                       elv_rqhash_add(q, rq);
-                       if (!q->last_merge)
-                               q->last_merge = rq;
-               }
-
-               /*
-                * set expire time and add to fifo list
-                */
-               rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
-               list_add_tail(&rq->queuelist, &per_prio->fifo_list[data_dir]);
-       }
-}
-
-/*
- * Called from blk_mq_sched_insert_request() or blk_mq_sched_insert_requests().
- */
-static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
-                              struct list_head *list, bool at_head)
-{
-       struct request_queue *q = hctx->queue;
-       struct deadline_data *dd = q->elevator->elevator_data;
-
-       spin_lock(&dd->lock);
-       while (!list_empty(list)) {
-               struct request *rq;
-
-               rq = list_first_entry(list, struct request, queuelist);
-               list_del_init(&rq->queuelist);
-               dd_insert_request(hctx, rq, at_head);
-       }
-       spin_unlock(&dd->lock);
-}
-
-/* Callback from inside blk_mq_rq_ctx_init(). */
-static void dd_prepare_request(struct request *rq)
-{
-       rq->elv.priv[0] = NULL;
-}
-
-/*
- * Callback from inside blk_mq_free_request().
- *
- * For zoned block devices, write unlock the target zone of
- * completed write requests. Do this while holding the zone lock
- * spinlock so that the zone is never unlocked while deadline_fifo_request()
- * or deadline_next_request() are executing. This function is called for
- * all requests, whether or not these requests complete successfully.
- *
- * For a zoned block device, __dd_dispatch_request() may have stopped
- * dispatching requests if all the queued requests are write requests directed
- * at zones that are already locked due to on-going write requests. To ensure
- * write request dispatch progress in this case, mark the queue as needing a
- * restart to ensure that the queue is run again after completion of the
- * request and zones being unlocked.
- */
-static void dd_finish_request(struct request *rq)
-{
-       struct request_queue *q = rq->q;
-       struct deadline_data *dd = q->elevator->elevator_data;
-       struct dd_blkcg *blkcg = rq->elv.priv[0];
-       const u8 ioprio_class = dd_rq_ioclass(rq);
-       const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
-       struct dd_per_prio *per_prio = &dd->per_prio[prio];
-
-       dd_count(dd, completed, prio);
-       ddcg_count(blkcg, completed, ioprio_class);
-
-       if (blk_queue_is_zoned(q)) {
-               unsigned long flags;
-
-               spin_lock_irqsave(&dd->zone_lock, flags);
-               blk_req_zone_write_unlock(rq);
-               if (!list_empty(&per_prio->fifo_list[DD_WRITE]))
-                       blk_mq_sched_mark_restart_hctx(rq->mq_hctx);
-               spin_unlock_irqrestore(&dd->zone_lock, flags);
-       }
-}
-
-static bool dd_has_work_for_prio(struct dd_per_prio *per_prio)
-{
-       return !list_empty_careful(&per_prio->dispatch) ||
-               !list_empty_careful(&per_prio->fifo_list[DD_READ]) ||
-               !list_empty_careful(&per_prio->fifo_list[DD_WRITE]);
-}
-
-static bool dd_has_work(struct blk_mq_hw_ctx *hctx)
-{
-       struct deadline_data *dd = hctx->queue->elevator->elevator_data;
-       enum dd_prio prio;
-
-       for (prio = 0; prio <= DD_PRIO_MAX; prio++)
-               if (dd_has_work_for_prio(&dd->per_prio[prio]))
-                       return true;
-
-       return false;
-}
-
-/*
- * sysfs parts below
- */
-#define SHOW_INT(__FUNC, __VAR)                                                \
-static ssize_t __FUNC(struct elevator_queue *e, char *page)            \
-{                                                                      \
-       struct deadline_data *dd = e->elevator_data;                    \
-                                                                       \
-       return sysfs_emit(page, "%d\n", __VAR);                         \
-}
-#define SHOW_JIFFIES(__FUNC, __VAR) SHOW_INT(__FUNC, jiffies_to_msecs(__VAR))
-SHOW_JIFFIES(deadline_read_expire_show, dd->fifo_expire[DD_READ]);
-SHOW_JIFFIES(deadline_write_expire_show, dd->fifo_expire[DD_WRITE]);
-SHOW_JIFFIES(deadline_aging_expire_show, dd->aging_expire);
-SHOW_INT(deadline_writes_starved_show, dd->writes_starved);
-SHOW_INT(deadline_front_merges_show, dd->front_merges);
-SHOW_INT(deadline_async_depth_show, dd->front_merges);
-SHOW_INT(deadline_fifo_batch_show, dd->fifo_batch);
-#undef SHOW_INT
-#undef SHOW_JIFFIES
-
-#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                        \
-static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)        \
-{                                                                      \
-       struct deadline_data *dd = e->elevator_data;                    \
-       int __data, __ret;                                              \
-                                                                       \
-       __ret = kstrtoint(page, 0, &__data);                            \
-       if (__ret < 0)                                                  \
-               return __ret;                                           \
-       if (__data < (MIN))                                             \
-               __data = (MIN);                                         \
-       else if (__data > (MAX))                                        \
-               __data = (MAX);                                         \
-       *(__PTR) = __CONV(__data);                                      \
-       return count;                                                   \
-}
-#define STORE_INT(__FUNC, __PTR, MIN, MAX)                             \
-       STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, )
-#define STORE_JIFFIES(__FUNC, __PTR, MIN, MAX)                         \
-       STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, msecs_to_jiffies)
-STORE_JIFFIES(deadline_read_expire_store, &dd->fifo_expire[DD_READ], 0, INT_MAX);
-STORE_JIFFIES(deadline_write_expire_store, &dd->fifo_expire[DD_WRITE], 0, INT_MAX);
-STORE_JIFFIES(deadline_aging_expire_store, &dd->aging_expire, 0, INT_MAX);
-STORE_INT(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX);
-STORE_INT(deadline_front_merges_store, &dd->front_merges, 0, 1);
-STORE_INT(deadline_async_depth_store, &dd->front_merges, 1, INT_MAX);
-STORE_INT(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX);
-#undef STORE_FUNCTION
-#undef STORE_INT
-#undef STORE_JIFFIES
-
-#define DD_ATTR(name) \
-       __ATTR(name, 0644, deadline_##name##_show, deadline_##name##_store)
-
-static struct elv_fs_entry deadline_attrs[] = {
-       DD_ATTR(read_expire),
-       DD_ATTR(write_expire),
-       DD_ATTR(writes_starved),
-       DD_ATTR(front_merges),
-       DD_ATTR(async_depth),
-       DD_ATTR(fifo_batch),
-       DD_ATTR(aging_expire),
-       __ATTR_NULL
-};
-
-#ifdef CONFIG_BLK_DEBUG_FS
-#define DEADLINE_DEBUGFS_DDIR_ATTRS(prio, data_dir, name)              \
-static void *deadline_##name##_fifo_start(struct seq_file *m,          \
-                                         loff_t *pos)                  \
-       __acquires(&dd->lock)                                           \
-{                                                                      \
-       struct request_queue *q = m->private;                           \
-       struct deadline_data *dd = q->elevator->elevator_data;          \
-       struct dd_per_prio *per_prio = &dd->per_prio[prio];             \
-                                                                       \
-       spin_lock(&dd->lock);                                           \
-       return seq_list_start(&per_prio->fifo_list[data_dir], *pos);    \
-}                                                                      \
-                                                                       \
-static void *deadline_##name##_fifo_next(struct seq_file *m, void *v,  \
-                                        loff_t *pos)                   \
-{                                                                      \
-       struct request_queue *q = m->private;                           \
-       struct deadline_data *dd = q->elevator->elevator_data;          \
-       struct dd_per_prio *per_prio = &dd->per_prio[prio];             \
-                                                                       \
-       return seq_list_next(v, &per_prio->fifo_list[data_dir], pos);   \
-}                                                                      \
-                                                                       \
-static void deadline_##name##_fifo_stop(struct seq_file *m, void *v)   \
-       __releases(&dd->lock)                                           \
-{                                                                      \
-       struct request_queue *q = m->private;                           \
-       struct deadline_data *dd = q->elevator->elevator_data;          \
-                                                                       \
-       spin_unlock(&dd->lock);                                         \
-}                                                                      \
-                                                                       \
-static const struct seq_operations deadline_##name##_fifo_seq_ops = {  \
-       .start  = deadline_##name##_fifo_start,                         \
-       .next   = deadline_##name##_fifo_next,                          \
-       .stop   = deadline_##name##_fifo_stop,                          \
-       .show   = blk_mq_debugfs_rq_show,                               \
-};                                                                     \
-                                                                       \
-static int deadline_##name##_next_rq_show(void *data,                  \
-                                         struct seq_file *m)           \
-{                                                                      \
-       struct request_queue *q = data;                                 \
-       struct deadline_data *dd = q->elevator->elevator_data;          \
-       struct dd_per_prio *per_prio = &dd->per_prio[prio];             \
-       struct request *rq = per_prio->next_rq[data_dir];               \
-                                                                       \
-       if (rq)                                                         \
-               __blk_mq_debugfs_rq_show(m, rq);                        \
-       return 0;                                                       \
-}
-
-DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_READ, read0);
-DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_WRITE, write0);
-DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_READ, read1);
-DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_WRITE, write1);
-DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_READ, read2);
-DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_WRITE, write2);
-#undef DEADLINE_DEBUGFS_DDIR_ATTRS
-
-static int deadline_batching_show(void *data, struct seq_file *m)
-{
-       struct request_queue *q = data;
-       struct deadline_data *dd = q->elevator->elevator_data;
-
-       seq_printf(m, "%u\n", dd->batching);
-       return 0;
-}
-
-static int deadline_starved_show(void *data, struct seq_file *m)
-{
-       struct request_queue *q = data;
-       struct deadline_data *dd = q->elevator->elevator_data;
-
-       seq_printf(m, "%u\n", dd->starved);
-       return 0;
-}
-
-static int dd_async_depth_show(void *data, struct seq_file *m)
-{
-       struct request_queue *q = data;
-       struct deadline_data *dd = q->elevator->elevator_data;
-
-       seq_printf(m, "%u\n", dd->async_depth);
-       return 0;
-}
-
-static int dd_queued_show(void *data, struct seq_file *m)
-{
-       struct request_queue *q = data;
-       struct deadline_data *dd = q->elevator->elevator_data;
-
-       seq_printf(m, "%u %u %u\n", dd_queued(dd, DD_RT_PRIO),
-                  dd_queued(dd, DD_BE_PRIO),
-                  dd_queued(dd, DD_IDLE_PRIO));
-       return 0;
-}
-
-/* Number of requests owned by the block driver for a given priority. */
-static u32 dd_owned_by_driver(struct deadline_data *dd, enum dd_prio prio)
-{
-       return dd_sum(dd, dispatched, prio) + dd_sum(dd, merged, prio)
-               - dd_sum(dd, completed, prio);
-}
-
-static int dd_owned_by_driver_show(void *data, struct seq_file *m)
-{
-       struct request_queue *q = data;
-       struct deadline_data *dd = q->elevator->elevator_data;
-
-       seq_printf(m, "%u %u %u\n", dd_owned_by_driver(dd, DD_RT_PRIO),
-                  dd_owned_by_driver(dd, DD_BE_PRIO),
-                  dd_owned_by_driver(dd, DD_IDLE_PRIO));
-       return 0;
-}
-
-#define DEADLINE_DISPATCH_ATTR(prio)                                   \
-static void *deadline_dispatch##prio##_start(struct seq_file *m,       \
-                                            loff_t *pos)               \
-       __acquires(&dd->lock)                                           \
-{                                                                      \
-       struct request_queue *q = m->private;                           \
-       struct deadline_data *dd = q->elevator->elevator_data;          \
-       struct dd_per_prio *per_prio = &dd->per_prio[prio];             \
-                                                                       \
-       spin_lock(&dd->lock);                                           \
-       return seq_list_start(&per_prio->dispatch, *pos);               \
-}                                                                      \
-                                                                       \
-static void *deadline_dispatch##prio##_next(struct seq_file *m,                \
-                                           void *v, loff_t *pos)       \
-{                                                                      \
-       struct request_queue *q = m->private;                           \
-       struct deadline_data *dd = q->elevator->elevator_data;          \
-       struct dd_per_prio *per_prio = &dd->per_prio[prio];             \
-                                                                       \
-       return seq_list_next(v, &per_prio->dispatch, pos);              \
-}                                                                      \
-                                                                       \
-static void deadline_dispatch##prio##_stop(struct seq_file *m, void *v)        \
-       __releases(&dd->lock)                                           \
-{                                                                      \
-       struct request_queue *q = m->private;                           \
-       struct deadline_data *dd = q->elevator->elevator_data;          \
-                                                                       \
-       spin_unlock(&dd->lock);                                         \
-}                                                                      \
-                                                                       \
-static const struct seq_operations deadline_dispatch##prio##_seq_ops = { \
-       .start  = deadline_dispatch##prio##_start,                      \
-       .next   = deadline_dispatch##prio##_next,                       \
-       .stop   = deadline_dispatch##prio##_stop,                       \
-       .show   = blk_mq_debugfs_rq_show,                               \
-}
-
-DEADLINE_DISPATCH_ATTR(0);
-DEADLINE_DISPATCH_ATTR(1);
-DEADLINE_DISPATCH_ATTR(2);
-#undef DEADLINE_DISPATCH_ATTR
-
-#define DEADLINE_QUEUE_DDIR_ATTRS(name)                                        \
-       {#name "_fifo_list", 0400,                                      \
-                       .seq_ops = &deadline_##name##_fifo_seq_ops}
-#define DEADLINE_NEXT_RQ_ATTR(name)                                    \
-       {#name "_next_rq", 0400, deadline_##name##_next_rq_show}
-static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = {
-       DEADLINE_QUEUE_DDIR_ATTRS(read0),
-       DEADLINE_QUEUE_DDIR_ATTRS(write0),
-       DEADLINE_QUEUE_DDIR_ATTRS(read1),
-       DEADLINE_QUEUE_DDIR_ATTRS(write1),
-       DEADLINE_QUEUE_DDIR_ATTRS(read2),
-       DEADLINE_QUEUE_DDIR_ATTRS(write2),
-       DEADLINE_NEXT_RQ_ATTR(read0),
-       DEADLINE_NEXT_RQ_ATTR(write0),
-       DEADLINE_NEXT_RQ_ATTR(read1),
-       DEADLINE_NEXT_RQ_ATTR(write1),
-       DEADLINE_NEXT_RQ_ATTR(read2),
-       DEADLINE_NEXT_RQ_ATTR(write2),
-       {"batching", 0400, deadline_batching_show},
-       {"starved", 0400, deadline_starved_show},
-       {"async_depth", 0400, dd_async_depth_show},
-       {"dispatch0", 0400, .seq_ops = &deadline_dispatch0_seq_ops},
-       {"dispatch1", 0400, .seq_ops = &deadline_dispatch1_seq_ops},
-       {"dispatch2", 0400, .seq_ops = &deadline_dispatch2_seq_ops},
-       {"owned_by_driver", 0400, dd_owned_by_driver_show},
-       {"queued", 0400, dd_queued_show},
-       {},
-};
-#undef DEADLINE_QUEUE_DDIR_ATTRS
-#endif
-
-static struct elevator_type mq_deadline = {
-       .ops = {
-               .depth_updated          = dd_depth_updated,
-               .limit_depth            = dd_limit_depth,
-               .insert_requests        = dd_insert_requests,
-               .dispatch_request       = dd_dispatch_request,
-               .prepare_request        = dd_prepare_request,
-               .finish_request         = dd_finish_request,
-               .next_request           = elv_rb_latter_request,
-               .former_request         = elv_rb_former_request,
-               .bio_merge              = dd_bio_merge,
-               .request_merge          = dd_request_merge,
-               .requests_merged        = dd_merged_requests,
-               .request_merged         = dd_request_merged,
-               .has_work               = dd_has_work,
-               .init_sched             = dd_init_sched,
-               .exit_sched             = dd_exit_sched,
-               .init_hctx              = dd_init_hctx,
-       },
-
-#ifdef CONFIG_BLK_DEBUG_FS
-       .queue_debugfs_attrs = deadline_queue_debugfs_attrs,
-#endif
-       .elevator_attrs = deadline_attrs,
-       .elevator_name = "mq-deadline",
-       .elevator_alias = "deadline",
-       .elevator_features = ELEVATOR_F_ZBD_SEQ_WRITE,
-       .elevator_owner = THIS_MODULE,
-};
-MODULE_ALIAS("mq-deadline-iosched");
-
-static int __init deadline_init(void)
-{
-       int ret;
-
-       ret = elv_register(&mq_deadline);
-       if (ret)
-               goto out;
-       ret = dd_blkcg_init();
-       if (ret)
-               goto unreg;
-
-out:
-       return ret;
-
-unreg:
-       elv_unregister(&mq_deadline);
-       goto out;
-}
-
-static void __exit deadline_exit(void)
-{
-       dd_blkcg_exit();
-       elv_unregister(&mq_deadline);
-}
-
-module_init(deadline_init);
-module_exit(deadline_exit);
-
-MODULE_AUTHOR("Jens Axboe, Damien Le Moal and Bart Van Assche");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("MQ deadline IO scheduler");
diff --git a/block/mq-deadline.c b/block/mq-deadline.c
new file mode 100644 (file)
index 0000000..3c3693c
--- /dev/null
@@ -0,0 +1,1106 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *  MQ Deadline i/o scheduler - adaptation of the legacy deadline scheduler,
+ *  for the blk-mq scheduling framework
+ *
+ *  Copyright (C) 2016 Jens Axboe <axboe@kernel.dk>
+ */
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
+#include <linux/elevator.h>
+#include <linux/bio.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/compiler.h>
+#include <linux/rbtree.h>
+#include <linux/sbitmap.h>
+
+#include <trace/events/block.h>
+
+#include "blk.h"
+#include "blk-mq.h"
+#include "blk-mq-debugfs.h"
+#include "blk-mq-tag.h"
+#include "blk-mq-sched.h"
+
+/*
+ * See Documentation/block/deadline-iosched.rst
+ */
+static const int read_expire = HZ / 2;  /* max time before a read is submitted. */
+static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
+static const int writes_starved = 2;    /* max times reads can starve a write */
+static const int fifo_batch = 16;       /* # of sequential requests treated as one
+                                    by the above parameters. For throughput. */
+
+enum dd_data_dir {
+       DD_READ         = READ,
+       DD_WRITE        = WRITE,
+};
+
+enum { DD_DIR_COUNT = 2 };
+
+enum dd_prio {
+       DD_RT_PRIO      = 0,
+       DD_BE_PRIO      = 1,
+       DD_IDLE_PRIO    = 2,
+       DD_PRIO_MAX     = 2,
+};
+
+enum { DD_PRIO_COUNT = 3 };
+
+/* I/O statistics per I/O priority. */
+struct io_stats_per_prio {
+       local_t inserted;
+       local_t merged;
+       local_t dispatched;
+       local_t completed;
+};
+
+/* I/O statistics for all I/O priorities (enum dd_prio). */
+struct io_stats {
+       struct io_stats_per_prio stats[DD_PRIO_COUNT];
+};
+
+/*
+ * Deadline scheduler data per I/O priority (enum dd_prio). Requests are
+ * present on both sort_list[] and fifo_list[].
+ */
+struct dd_per_prio {
+       struct list_head dispatch;
+       struct rb_root sort_list[DD_DIR_COUNT];
+       struct list_head fifo_list[DD_DIR_COUNT];
+       /* Next request in FIFO order. Read, write or both are NULL. */
+       struct request *next_rq[DD_DIR_COUNT];
+};
+
+struct deadline_data {
+       /*
+        * run time data
+        */
+
+       struct dd_per_prio per_prio[DD_PRIO_COUNT];
+
+       /* Data direction of latest dispatched request. */
+       enum dd_data_dir last_dir;
+       unsigned int batching;          /* number of sequential requests made */
+       unsigned int starved;           /* times reads have starved writes */
+
+       struct io_stats __percpu *stats;
+
+       /*
+        * settings that change how the i/o scheduler behaves
+        */
+       int fifo_expire[DD_DIR_COUNT];
+       int fifo_batch;
+       int writes_starved;
+       int front_merges;
+       u32 async_depth;
+
+       spinlock_t lock;
+       spinlock_t zone_lock;
+};
+
+/* Count one event of type 'event_type' and with I/O priority 'prio' */
+#define dd_count(dd, event_type, prio) do {                            \
+       struct io_stats *io_stats = get_cpu_ptr((dd)->stats);           \
+                                                                       \
+       BUILD_BUG_ON(!__same_type((dd), struct deadline_data *));       \
+       BUILD_BUG_ON(!__same_type((prio), enum dd_prio));               \
+       local_inc(&io_stats->stats[(prio)].event_type);                 \
+       put_cpu_ptr(io_stats);                                          \
+} while (0)
+
+/*
+ * Returns the total number of dd_count(dd, event_type, prio) calls across all
+ * CPUs. No locking or barriers since it is fine if the returned sum is slightly
+ * outdated.
+ */
+#define dd_sum(dd, event_type, prio) ({                                        \
+       unsigned int cpu;                                               \
+       u32 sum = 0;                                                    \
+                                                                       \
+       BUILD_BUG_ON(!__same_type((dd), struct deadline_data *));       \
+       BUILD_BUG_ON(!__same_type((prio), enum dd_prio));               \
+       for_each_present_cpu(cpu)                                       \
+               sum += local_read(&per_cpu_ptr((dd)->stats, cpu)->      \
+                                 stats[(prio)].event_type);            \
+       sum;                                                            \
+})
+
+/* Maps an I/O priority class to a deadline scheduler priority. */
+static const enum dd_prio ioprio_class_to_prio[] = {
+       [IOPRIO_CLASS_NONE]     = DD_BE_PRIO,
+       [IOPRIO_CLASS_RT]       = DD_RT_PRIO,
+       [IOPRIO_CLASS_BE]       = DD_BE_PRIO,
+       [IOPRIO_CLASS_IDLE]     = DD_IDLE_PRIO,
+};
+
+static inline struct rb_root *
+deadline_rb_root(struct dd_per_prio *per_prio, struct request *rq)
+{
+       return &per_prio->sort_list[rq_data_dir(rq)];
+}
+
+/*
+ * Returns the I/O priority class (IOPRIO_CLASS_*) that has been assigned to a
+ * request.
+ */
+static u8 dd_rq_ioclass(struct request *rq)
+{
+       return IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
+}
+
+/*
+ * get the request after `rq' in sector-sorted order
+ */
+static inline struct request *
+deadline_latter_request(struct request *rq)
+{
+       struct rb_node *node = rb_next(&rq->rb_node);
+
+       if (node)
+               return rb_entry_rq(node);
+
+       return NULL;
+}
+
+static void
+deadline_add_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
+{
+       struct rb_root *root = deadline_rb_root(per_prio, rq);
+
+       elv_rb_add(root, rq);
+}
+
+static inline void
+deadline_del_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
+{
+       const enum dd_data_dir data_dir = rq_data_dir(rq);
+
+       if (per_prio->next_rq[data_dir] == rq)
+               per_prio->next_rq[data_dir] = deadline_latter_request(rq);
+
+       elv_rb_del(deadline_rb_root(per_prio, rq), rq);
+}
+
+/*
+ * remove rq from rbtree and fifo.
+ */
+static void deadline_remove_request(struct request_queue *q,
+                                   struct dd_per_prio *per_prio,
+                                   struct request *rq)
+{
+       list_del_init(&rq->queuelist);
+
+       /*
+        * We might not be on the rbtree, if we are doing an insert merge
+        */
+       if (!RB_EMPTY_NODE(&rq->rb_node))
+               deadline_del_rq_rb(per_prio, rq);
+
+       elv_rqhash_del(q, rq);
+       if (q->last_merge == rq)
+               q->last_merge = NULL;
+}
+
+static void dd_request_merged(struct request_queue *q, struct request *req,
+                             enum elv_merge type)
+{
+       struct deadline_data *dd = q->elevator->elevator_data;
+       const u8 ioprio_class = dd_rq_ioclass(req);
+       const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
+       struct dd_per_prio *per_prio = &dd->per_prio[prio];
+
+       /*
+        * if the merge was a front merge, we need to reposition request
+        */
+       if (type == ELEVATOR_FRONT_MERGE) {
+               elv_rb_del(deadline_rb_root(per_prio, req), req);
+               deadline_add_rq_rb(per_prio, req);
+       }
+}
+
+/*
+ * Callback function that is invoked after @next has been merged into @req.
+ */
+static void dd_merged_requests(struct request_queue *q, struct request *req,
+                              struct request *next)
+{
+       struct deadline_data *dd = q->elevator->elevator_data;
+       const u8 ioprio_class = dd_rq_ioclass(next);
+       const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
+
+       dd_count(dd, merged, prio);
+
+       /*
+        * if next expires before rq, assign its expire time to rq
+        * and move into next position (next will be deleted) in fifo
+        */
+       if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
+               if (time_before((unsigned long)next->fifo_time,
+                               (unsigned long)req->fifo_time)) {
+                       list_move(&req->queuelist, &next->queuelist);
+                       req->fifo_time = next->fifo_time;
+               }
+       }
+
+       /*
+        * kill knowledge of next, this one is a goner
+        */
+       deadline_remove_request(q, &dd->per_prio[prio], next);
+}
+
+/*
+ * move an entry to dispatch queue
+ */
+static void
+deadline_move_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
+                     struct request *rq)
+{
+       const enum dd_data_dir data_dir = rq_data_dir(rq);
+
+       per_prio->next_rq[data_dir] = deadline_latter_request(rq);
+
+       /*
+        * take it off the sort and fifo list
+        */
+       deadline_remove_request(rq->q, per_prio, rq);
+}
+
+/* Number of requests queued for a given priority level. */
+static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio)
+{
+       return dd_sum(dd, inserted, prio) - dd_sum(dd, completed, prio);
+}
+
+/*
+ * deadline_check_fifo returns 0 if there are no expired requests on the fifo,
+ * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
+ */
+static inline int deadline_check_fifo(struct dd_per_prio *per_prio,
+                                     enum dd_data_dir data_dir)
+{
+       struct request *rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
+
+       /*
+        * rq is expired!
+        */
+       if (time_after_eq(jiffies, (unsigned long)rq->fifo_time))
+               return 1;
+
+       return 0;
+}
+
+/*
+ * For the specified data direction, return the next request to
+ * dispatch using arrival ordered lists.
+ */
+static struct request *
+deadline_fifo_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
+                     enum dd_data_dir data_dir)
+{
+       struct request *rq;
+       unsigned long flags;
+
+       if (list_empty(&per_prio->fifo_list[data_dir]))
+               return NULL;
+
+       rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
+       if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q))
+               return rq;
+
+       /*
+        * Look for a write request that can be dispatched, that is one with
+        * an unlocked target zone.
+        */
+       spin_lock_irqsave(&dd->zone_lock, flags);
+       list_for_each_entry(rq, &per_prio->fifo_list[DD_WRITE], queuelist) {
+               if (blk_req_can_dispatch_to_zone(rq))
+                       goto out;
+       }
+       rq = NULL;
+out:
+       spin_unlock_irqrestore(&dd->zone_lock, flags);
+
+       return rq;
+}
+
+/*
+ * For the specified data direction, return the next request to
+ * dispatch using sector position sorted lists.
+ */
+static struct request *
+deadline_next_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
+                     enum dd_data_dir data_dir)
+{
+       struct request *rq;
+       unsigned long flags;
+
+       rq = per_prio->next_rq[data_dir];
+       if (!rq)
+               return NULL;
+
+       if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q))
+               return rq;
+
+       /*
+        * Look for a write request that can be dispatched, that is one with
+        * an unlocked target zone.
+        */
+       spin_lock_irqsave(&dd->zone_lock, flags);
+       while (rq) {
+               if (blk_req_can_dispatch_to_zone(rq))
+                       break;
+               rq = deadline_latter_request(rq);
+       }
+       spin_unlock_irqrestore(&dd->zone_lock, flags);
+
+       return rq;
+}
+
+/*
+ * deadline_dispatch_requests selects the best request according to
+ * read/write expire, fifo_batch, etc
+ */
+static struct request *__dd_dispatch_request(struct deadline_data *dd,
+                                            struct dd_per_prio *per_prio)
+{
+       struct request *rq, *next_rq;
+       enum dd_data_dir data_dir;
+       enum dd_prio prio;
+       u8 ioprio_class;
+
+       lockdep_assert_held(&dd->lock);
+
+       if (!list_empty(&per_prio->dispatch)) {
+               rq = list_first_entry(&per_prio->dispatch, struct request,
+                                     queuelist);
+               list_del_init(&rq->queuelist);
+               goto done;
+       }
+
+       /*
+        * batches are currently reads XOR writes
+        */
+       rq = deadline_next_request(dd, per_prio, dd->last_dir);
+       if (rq && dd->batching < dd->fifo_batch)
+               /* we have a next request are still entitled to batch */
+               goto dispatch_request;
+
+       /*
+        * at this point we are not running a batch. select the appropriate
+        * data direction (read / write)
+        */
+
+       if (!list_empty(&per_prio->fifo_list[DD_READ])) {
+               BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_READ]));
+
+               if (deadline_fifo_request(dd, per_prio, DD_WRITE) &&
+                   (dd->starved++ >= dd->writes_starved))
+                       goto dispatch_writes;
+
+               data_dir = DD_READ;
+
+               goto dispatch_find_request;
+       }
+
+       /*
+        * there are either no reads or writes have been starved
+        */
+
+       if (!list_empty(&per_prio->fifo_list[DD_WRITE])) {
+dispatch_writes:
+               BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_WRITE]));
+
+               dd->starved = 0;
+
+               data_dir = DD_WRITE;
+
+               goto dispatch_find_request;
+       }
+
+       return NULL;
+
+dispatch_find_request:
+       /*
+        * we are not running a batch, find best request for selected data_dir
+        */
+       next_rq = deadline_next_request(dd, per_prio, data_dir);
+       if (deadline_check_fifo(per_prio, data_dir) || !next_rq) {
+               /*
+                * A deadline has expired, the last request was in the other
+                * direction, or we have run out of higher-sectored requests.
+                * Start again from the request with the earliest expiry time.
+                */
+               rq = deadline_fifo_request(dd, per_prio, data_dir);
+       } else {
+               /*
+                * The last req was the same dir and we have a next request in
+                * sort order. No expired requests so continue on from here.
+                */
+               rq = next_rq;
+       }
+
+       /*
+        * For a zoned block device, if we only have writes queued and none of
+        * them can be dispatched, rq will be NULL.
+        */
+       if (!rq)
+               return NULL;
+
+       dd->last_dir = data_dir;
+       dd->batching = 0;
+
+dispatch_request:
+       /*
+        * rq is the selected appropriate request.
+        */
+       dd->batching++;
+       deadline_move_request(dd, per_prio, rq);
+done:
+       ioprio_class = dd_rq_ioclass(rq);
+       prio = ioprio_class_to_prio[ioprio_class];
+       dd_count(dd, dispatched, prio);
+       /*
+        * If the request needs its target zone locked, do it.
+        */
+       blk_req_zone_write_lock(rq);
+       rq->rq_flags |= RQF_STARTED;
+       return rq;
+}
+
+/*
+ * Called from blk_mq_run_hw_queue() -> __blk_mq_sched_dispatch_requests().
+ *
+ * One confusing aspect here is that we get called for a specific
+ * hardware queue, but we may return a request that is for a
+ * different hardware queue. This is because mq-deadline has shared
+ * state for all hardware queues, in terms of sorting, FIFOs, etc.
+ */
+static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
+{
+       struct deadline_data *dd = hctx->queue->elevator->elevator_data;
+       struct request *rq;
+       enum dd_prio prio;
+
+       spin_lock(&dd->lock);
+       for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
+               rq = __dd_dispatch_request(dd, &dd->per_prio[prio]);
+               if (rq)
+                       break;
+       }
+       spin_unlock(&dd->lock);
+
+       return rq;
+}
+
+/*
+ * Called by __blk_mq_alloc_request(). The shallow_depth value set by this
+ * function is used by __blk_mq_get_tag().
+ */
+static void dd_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
+{
+       struct deadline_data *dd = data->q->elevator->elevator_data;
+
+       /* Do not throttle synchronous reads. */
+       if (op_is_sync(op) && !op_is_write(op))
+               return;
+
+       /*
+        * Throttle asynchronous requests and writes such that these requests
+        * do not block the allocation of synchronous requests.
+        */
+       data->shallow_depth = dd->async_depth;
+}
+
+/* Called by blk_mq_update_nr_requests(). */
+static void dd_depth_updated(struct blk_mq_hw_ctx *hctx)
+{
+       struct request_queue *q = hctx->queue;
+       struct deadline_data *dd = q->elevator->elevator_data;
+       struct blk_mq_tags *tags = hctx->sched_tags;
+
+       dd->async_depth = max(1UL, 3 * q->nr_requests / 4);
+
+       sbitmap_queue_min_shallow_depth(tags->bitmap_tags, dd->async_depth);
+}
+
+/* Called by blk_mq_init_hctx() and blk_mq_init_sched(). */
+static int dd_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
+{
+       dd_depth_updated(hctx);
+       return 0;
+}
+
+static void dd_exit_sched(struct elevator_queue *e)
+{
+       struct deadline_data *dd = e->elevator_data;
+       enum dd_prio prio;
+
+       for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
+               struct dd_per_prio *per_prio = &dd->per_prio[prio];
+
+               WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_READ]));
+               WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_WRITE]));
+       }
+
+       free_percpu(dd->stats);
+
+       kfree(dd);
+}
+
+/*
+ * initialize elevator private data (deadline_data).
+ */
+static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
+{
+       struct deadline_data *dd;
+       struct elevator_queue *eq;
+       enum dd_prio prio;
+       int ret = -ENOMEM;
+
+       eq = elevator_alloc(q, e);
+       if (!eq)
+               return ret;
+
+       dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
+       if (!dd)
+               goto put_eq;
+
+       eq->elevator_data = dd;
+
+       dd->stats = alloc_percpu_gfp(typeof(*dd->stats),
+                                    GFP_KERNEL | __GFP_ZERO);
+       if (!dd->stats)
+               goto free_dd;
+
+       for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
+               struct dd_per_prio *per_prio = &dd->per_prio[prio];
+
+               INIT_LIST_HEAD(&per_prio->dispatch);
+               INIT_LIST_HEAD(&per_prio->fifo_list[DD_READ]);
+               INIT_LIST_HEAD(&per_prio->fifo_list[DD_WRITE]);
+               per_prio->sort_list[DD_READ] = RB_ROOT;
+               per_prio->sort_list[DD_WRITE] = RB_ROOT;
+       }
+       dd->fifo_expire[DD_READ] = read_expire;
+       dd->fifo_expire[DD_WRITE] = write_expire;
+       dd->writes_starved = writes_starved;
+       dd->front_merges = 1;
+       dd->last_dir = DD_WRITE;
+       dd->fifo_batch = fifo_batch;
+       spin_lock_init(&dd->lock);
+       spin_lock_init(&dd->zone_lock);
+
+       q->elevator = eq;
+       return 0;
+
+free_dd:
+       kfree(dd);
+
+put_eq:
+       kobject_put(&eq->kobj);
+       return ret;
+}
+
+/*
+ * Try to merge @bio into an existing request. If @bio has been merged into
+ * an existing request, store the pointer to that request into *@rq.
+ */
+static int dd_request_merge(struct request_queue *q, struct request **rq,
+                           struct bio *bio)
+{
+       struct deadline_data *dd = q->elevator->elevator_data;
+       const u8 ioprio_class = IOPRIO_PRIO_CLASS(bio->bi_ioprio);
+       const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
+       struct dd_per_prio *per_prio = &dd->per_prio[prio];
+       sector_t sector = bio_end_sector(bio);
+       struct request *__rq;
+
+       if (!dd->front_merges)
+               return ELEVATOR_NO_MERGE;
+
+       __rq = elv_rb_find(&per_prio->sort_list[bio_data_dir(bio)], sector);
+       if (__rq) {
+               BUG_ON(sector != blk_rq_pos(__rq));
+
+               if (elv_bio_merge_ok(__rq, bio)) {
+                       *rq = __rq;
+                       if (blk_discard_mergable(__rq))
+                               return ELEVATOR_DISCARD_MERGE;
+                       return ELEVATOR_FRONT_MERGE;
+               }
+       }
+
+       return ELEVATOR_NO_MERGE;
+}
+
+/*
+ * Attempt to merge a bio into an existing request. This function is called
+ * before @bio is associated with a request.
+ */
+static bool dd_bio_merge(struct request_queue *q, struct bio *bio,
+               unsigned int nr_segs)
+{
+       struct deadline_data *dd = q->elevator->elevator_data;
+       struct request *free = NULL;
+       bool ret;
+
+       spin_lock(&dd->lock);
+       ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
+       spin_unlock(&dd->lock);
+
+       if (free)
+               blk_mq_free_request(free);
+
+       return ret;
+}
+
+/*
+ * add rq to rbtree and fifo
+ */
+static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+                             bool at_head)
+{
+       struct request_queue *q = hctx->queue;
+       struct deadline_data *dd = q->elevator->elevator_data;
+       const enum dd_data_dir data_dir = rq_data_dir(rq);
+       u16 ioprio = req_get_ioprio(rq);
+       u8 ioprio_class = IOPRIO_PRIO_CLASS(ioprio);
+       struct dd_per_prio *per_prio;
+       enum dd_prio prio;
+       LIST_HEAD(free);
+
+       lockdep_assert_held(&dd->lock);
+
+       /*
+        * This may be a requeue of a write request that has locked its
+        * target zone. If it is the case, this releases the zone lock.
+        */
+       blk_req_zone_write_unlock(rq);
+
+       prio = ioprio_class_to_prio[ioprio_class];
+       dd_count(dd, inserted, prio);
+       rq->elv.priv[0] = (void *)(uintptr_t)1;
+
+       if (blk_mq_sched_try_insert_merge(q, rq, &free)) {
+               blk_mq_free_requests(&free);
+               return;
+       }
+
+       trace_block_rq_insert(rq);
+
+       per_prio = &dd->per_prio[prio];
+       if (at_head) {
+               list_add(&rq->queuelist, &per_prio->dispatch);
+       } else {
+               deadline_add_rq_rb(per_prio, rq);
+
+               if (rq_mergeable(rq)) {
+                       elv_rqhash_add(q, rq);
+                       if (!q->last_merge)
+                               q->last_merge = rq;
+               }
+
+               /*
+                * set expire time and add to fifo list
+                */
+               rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
+               list_add_tail(&rq->queuelist, &per_prio->fifo_list[data_dir]);
+       }
+}
+
+/*
+ * Called from blk_mq_sched_insert_request() or blk_mq_sched_insert_requests().
+ */
+static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
+                              struct list_head *list, bool at_head)
+{
+       struct request_queue *q = hctx->queue;
+       struct deadline_data *dd = q->elevator->elevator_data;
+
+       spin_lock(&dd->lock);
+       while (!list_empty(list)) {
+               struct request *rq;
+
+               rq = list_first_entry(list, struct request, queuelist);
+               list_del_init(&rq->queuelist);
+               dd_insert_request(hctx, rq, at_head);
+       }
+       spin_unlock(&dd->lock);
+}
+
+/* Callback from inside blk_mq_rq_ctx_init(). */
+static void dd_prepare_request(struct request *rq)
+{
+       rq->elv.priv[0] = NULL;
+}
+
+/*
+ * Callback from inside blk_mq_free_request().
+ *
+ * For zoned block devices, write unlock the target zone of
+ * completed write requests. Do this while holding the zone lock
+ * spinlock so that the zone is never unlocked while deadline_fifo_request()
+ * or deadline_next_request() are executing. This function is called for
+ * all requests, whether or not these requests complete successfully.
+ *
+ * For a zoned block device, __dd_dispatch_request() may have stopped
+ * dispatching requests if all the queued requests are write requests directed
+ * at zones that are already locked due to on-going write requests. To ensure
+ * write request dispatch progress in this case, mark the queue as needing a
+ * restart to ensure that the queue is run again after completion of the
+ * request and zones being unlocked.
+ */
+static void dd_finish_request(struct request *rq)
+{
+       struct request_queue *q = rq->q;
+       struct deadline_data *dd = q->elevator->elevator_data;
+       const u8 ioprio_class = dd_rq_ioclass(rq);
+       const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
+       struct dd_per_prio *per_prio = &dd->per_prio[prio];
+
+       /*
+        * The block layer core may call dd_finish_request() without having
+        * called dd_insert_requests(). Hence only update statistics for
+        * requests for which dd_insert_requests() has been called. See also
+        * blk_mq_request_bypass_insert().
+        */
+       if (rq->elv.priv[0])
+               dd_count(dd, completed, prio);
+
+       if (blk_queue_is_zoned(q)) {
+               unsigned long flags;
+
+               spin_lock_irqsave(&dd->zone_lock, flags);
+               blk_req_zone_write_unlock(rq);
+               if (!list_empty(&per_prio->fifo_list[DD_WRITE]))
+                       blk_mq_sched_mark_restart_hctx(rq->mq_hctx);
+               spin_unlock_irqrestore(&dd->zone_lock, flags);
+       }
+}
+
+static bool dd_has_work_for_prio(struct dd_per_prio *per_prio)
+{
+       return !list_empty_careful(&per_prio->dispatch) ||
+               !list_empty_careful(&per_prio->fifo_list[DD_READ]) ||
+               !list_empty_careful(&per_prio->fifo_list[DD_WRITE]);
+}
+
+static bool dd_has_work(struct blk_mq_hw_ctx *hctx)
+{
+       struct deadline_data *dd = hctx->queue->elevator->elevator_data;
+       enum dd_prio prio;
+
+       for (prio = 0; prio <= DD_PRIO_MAX; prio++)
+               if (dd_has_work_for_prio(&dd->per_prio[prio]))
+                       return true;
+
+       return false;
+}
+
+/*
+ * sysfs parts below
+ */
+#define SHOW_INT(__FUNC, __VAR)                                                \
+static ssize_t __FUNC(struct elevator_queue *e, char *page)            \
+{                                                                      \
+       struct deadline_data *dd = e->elevator_data;                    \
+                                                                       \
+       return sysfs_emit(page, "%d\n", __VAR);                         \
+}
+#define SHOW_JIFFIES(__FUNC, __VAR) SHOW_INT(__FUNC, jiffies_to_msecs(__VAR))
+SHOW_JIFFIES(deadline_read_expire_show, dd->fifo_expire[DD_READ]);
+SHOW_JIFFIES(deadline_write_expire_show, dd->fifo_expire[DD_WRITE]);
+SHOW_INT(deadline_writes_starved_show, dd->writes_starved);
+SHOW_INT(deadline_front_merges_show, dd->front_merges);
+SHOW_INT(deadline_async_depth_show, dd->front_merges);
+SHOW_INT(deadline_fifo_batch_show, dd->fifo_batch);
+#undef SHOW_INT
+#undef SHOW_JIFFIES
+
+#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                        \
+static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)        \
+{                                                                      \
+       struct deadline_data *dd = e->elevator_data;                    \
+       int __data, __ret;                                              \
+                                                                       \
+       __ret = kstrtoint(page, 0, &__data);                            \
+       if (__ret < 0)                                                  \
+               return __ret;                                           \
+       if (__data < (MIN))                                             \
+               __data = (MIN);                                         \
+       else if (__data > (MAX))                                        \
+               __data = (MAX);                                         \
+       *(__PTR) = __CONV(__data);                                      \
+       return count;                                                   \
+}
+#define STORE_INT(__FUNC, __PTR, MIN, MAX)                             \
+       STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, )
+#define STORE_JIFFIES(__FUNC, __PTR, MIN, MAX)                         \
+       STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, msecs_to_jiffies)
+STORE_JIFFIES(deadline_read_expire_store, &dd->fifo_expire[DD_READ], 0, INT_MAX);
+STORE_JIFFIES(deadline_write_expire_store, &dd->fifo_expire[DD_WRITE], 0, INT_MAX);
+STORE_INT(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX);
+STORE_INT(deadline_front_merges_store, &dd->front_merges, 0, 1);
+STORE_INT(deadline_async_depth_store, &dd->front_merges, 1, INT_MAX);
+STORE_INT(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX);
+#undef STORE_FUNCTION
+#undef STORE_INT
+#undef STORE_JIFFIES
+
+#define DD_ATTR(name) \
+       __ATTR(name, 0644, deadline_##name##_show, deadline_##name##_store)
+
+static struct elv_fs_entry deadline_attrs[] = {
+       DD_ATTR(read_expire),
+       DD_ATTR(write_expire),
+       DD_ATTR(writes_starved),
+       DD_ATTR(front_merges),
+       DD_ATTR(async_depth),
+       DD_ATTR(fifo_batch),
+       __ATTR_NULL
+};
+
+#ifdef CONFIG_BLK_DEBUG_FS
+#define DEADLINE_DEBUGFS_DDIR_ATTRS(prio, data_dir, name)              \
+static void *deadline_##name##_fifo_start(struct seq_file *m,          \
+                                         loff_t *pos)                  \
+       __acquires(&dd->lock)                                           \
+{                                                                      \
+       struct request_queue *q = m->private;                           \
+       struct deadline_data *dd = q->elevator->elevator_data;          \
+       struct dd_per_prio *per_prio = &dd->per_prio[prio];             \
+                                                                       \
+       spin_lock(&dd->lock);                                           \
+       return seq_list_start(&per_prio->fifo_list[data_dir], *pos);    \
+}                                                                      \
+                                                                       \
+static void *deadline_##name##_fifo_next(struct seq_file *m, void *v,  \
+                                        loff_t *pos)                   \
+{                                                                      \
+       struct request_queue *q = m->private;                           \
+       struct deadline_data *dd = q->elevator->elevator_data;          \
+       struct dd_per_prio *per_prio = &dd->per_prio[prio];             \
+                                                                       \
+       return seq_list_next(v, &per_prio->fifo_list[data_dir], pos);   \
+}                                                                      \
+                                                                       \
+static void deadline_##name##_fifo_stop(struct seq_file *m, void *v)   \
+       __releases(&dd->lock)                                           \
+{                                                                      \
+       struct request_queue *q = m->private;                           \
+       struct deadline_data *dd = q->elevator->elevator_data;          \
+                                                                       \
+       spin_unlock(&dd->lock);                                         \
+}                                                                      \
+                                                                       \
+static const struct seq_operations deadline_##name##_fifo_seq_ops = {  \
+       .start  = deadline_##name##_fifo_start,                         \
+       .next   = deadline_##name##_fifo_next,                          \
+       .stop   = deadline_##name##_fifo_stop,                          \
+       .show   = blk_mq_debugfs_rq_show,                               \
+};                                                                     \
+                                                                       \
+static int deadline_##name##_next_rq_show(void *data,                  \
+                                         struct seq_file *m)           \
+{                                                                      \
+       struct request_queue *q = data;                                 \
+       struct deadline_data *dd = q->elevator->elevator_data;          \
+       struct dd_per_prio *per_prio = &dd->per_prio[prio];             \
+       struct request *rq = per_prio->next_rq[data_dir];               \
+                                                                       \
+       if (rq)                                                         \
+               __blk_mq_debugfs_rq_show(m, rq);                        \
+       return 0;                                                       \
+}
+
+DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_READ, read0);
+DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_WRITE, write0);
+DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_READ, read1);
+DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_WRITE, write1);
+DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_READ, read2);
+DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_WRITE, write2);
+#undef DEADLINE_DEBUGFS_DDIR_ATTRS
+
+static int deadline_batching_show(void *data, struct seq_file *m)
+{
+       struct request_queue *q = data;
+       struct deadline_data *dd = q->elevator->elevator_data;
+
+       seq_printf(m, "%u\n", dd->batching);
+       return 0;
+}
+
+static int deadline_starved_show(void *data, struct seq_file *m)
+{
+       struct request_queue *q = data;
+       struct deadline_data *dd = q->elevator->elevator_data;
+
+       seq_printf(m, "%u\n", dd->starved);
+       return 0;
+}
+
+static int dd_async_depth_show(void *data, struct seq_file *m)
+{
+       struct request_queue *q = data;
+       struct deadline_data *dd = q->elevator->elevator_data;
+
+       seq_printf(m, "%u\n", dd->async_depth);
+       return 0;
+}
+
+static int dd_queued_show(void *data, struct seq_file *m)
+{
+       struct request_queue *q = data;
+       struct deadline_data *dd = q->elevator->elevator_data;
+
+       seq_printf(m, "%u %u %u\n", dd_queued(dd, DD_RT_PRIO),
+                  dd_queued(dd, DD_BE_PRIO),
+                  dd_queued(dd, DD_IDLE_PRIO));
+       return 0;
+}
+
+/* Number of requests owned by the block driver for a given priority. */
+static u32 dd_owned_by_driver(struct deadline_data *dd, enum dd_prio prio)
+{
+       return dd_sum(dd, dispatched, prio) + dd_sum(dd, merged, prio)
+               - dd_sum(dd, completed, prio);
+}
+
+static int dd_owned_by_driver_show(void *data, struct seq_file *m)
+{
+       struct request_queue *q = data;
+       struct deadline_data *dd = q->elevator->elevator_data;
+
+       seq_printf(m, "%u %u %u\n", dd_owned_by_driver(dd, DD_RT_PRIO),
+                  dd_owned_by_driver(dd, DD_BE_PRIO),
+                  dd_owned_by_driver(dd, DD_IDLE_PRIO));
+       return 0;
+}
+
+#define DEADLINE_DISPATCH_ATTR(prio)                                   \
+static void *deadline_dispatch##prio##_start(struct seq_file *m,       \
+                                            loff_t *pos)               \
+       __acquires(&dd->lock)                                           \
+{                                                                      \
+       struct request_queue *q = m->private;                           \
+       struct deadline_data *dd = q->elevator->elevator_data;          \
+       struct dd_per_prio *per_prio = &dd->per_prio[prio];             \
+                                                                       \
+       spin_lock(&dd->lock);                                           \
+       return seq_list_start(&per_prio->dispatch, *pos);               \
+}                                                                      \
+                                                                       \
+static void *deadline_dispatch##prio##_next(struct seq_file *m,                \
+                                           void *v, loff_t *pos)       \
+{                                                                      \
+       struct request_queue *q = m->private;                           \
+       struct deadline_data *dd = q->elevator->elevator_data;          \
+       struct dd_per_prio *per_prio = &dd->per_prio[prio];             \
+                                                                       \
+       return seq_list_next(v, &per_prio->dispatch, pos);              \
+}                                                                      \
+                                                                       \
+static void deadline_dispatch##prio##_stop(struct seq_file *m, void *v)        \
+       __releases(&dd->lock)                                           \
+{                                                                      \
+       struct request_queue *q = m->private;                           \
+       struct deadline_data *dd = q->elevator->elevator_data;          \
+                                                                       \
+       spin_unlock(&dd->lock);                                         \
+}                                                                      \
+                                                                       \
+static const struct seq_operations deadline_dispatch##prio##_seq_ops = { \
+       .start  = deadline_dispatch##prio##_start,                      \
+       .next   = deadline_dispatch##prio##_next,                       \
+       .stop   = deadline_dispatch##prio##_stop,                       \
+       .show   = blk_mq_debugfs_rq_show,                               \
+}
+
+DEADLINE_DISPATCH_ATTR(0);
+DEADLINE_DISPATCH_ATTR(1);
+DEADLINE_DISPATCH_ATTR(2);
+#undef DEADLINE_DISPATCH_ATTR
+
+#define DEADLINE_QUEUE_DDIR_ATTRS(name)                                        \
+       {#name "_fifo_list", 0400,                                      \
+                       .seq_ops = &deadline_##name##_fifo_seq_ops}
+#define DEADLINE_NEXT_RQ_ATTR(name)                                    \
+       {#name "_next_rq", 0400, deadline_##name##_next_rq_show}
+static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = {
+       DEADLINE_QUEUE_DDIR_ATTRS(read0),
+       DEADLINE_QUEUE_DDIR_ATTRS(write0),
+       DEADLINE_QUEUE_DDIR_ATTRS(read1),
+       DEADLINE_QUEUE_DDIR_ATTRS(write1),
+       DEADLINE_QUEUE_DDIR_ATTRS(read2),
+       DEADLINE_QUEUE_DDIR_ATTRS(write2),
+       DEADLINE_NEXT_RQ_ATTR(read0),
+       DEADLINE_NEXT_RQ_ATTR(write0),
+       DEADLINE_NEXT_RQ_ATTR(read1),
+       DEADLINE_NEXT_RQ_ATTR(write1),
+       DEADLINE_NEXT_RQ_ATTR(read2),
+       DEADLINE_NEXT_RQ_ATTR(write2),
+       {"batching", 0400, deadline_batching_show},
+       {"starved", 0400, deadline_starved_show},
+       {"async_depth", 0400, dd_async_depth_show},
+       {"dispatch0", 0400, .seq_ops = &deadline_dispatch0_seq_ops},
+       {"dispatch1", 0400, .seq_ops = &deadline_dispatch1_seq_ops},
+       {"dispatch2", 0400, .seq_ops = &deadline_dispatch2_seq_ops},
+       {"owned_by_driver", 0400, dd_owned_by_driver_show},
+       {"queued", 0400, dd_queued_show},
+       {},
+};
+#undef DEADLINE_QUEUE_DDIR_ATTRS
+#endif
+
+static struct elevator_type mq_deadline = {
+       .ops = {
+               .depth_updated          = dd_depth_updated,
+               .limit_depth            = dd_limit_depth,
+               .insert_requests        = dd_insert_requests,
+               .dispatch_request       = dd_dispatch_request,
+               .prepare_request        = dd_prepare_request,
+               .finish_request         = dd_finish_request,
+               .next_request           = elv_rb_latter_request,
+               .former_request         = elv_rb_former_request,
+               .bio_merge              = dd_bio_merge,
+               .request_merge          = dd_request_merge,
+               .requests_merged        = dd_merged_requests,
+               .request_merged         = dd_request_merged,
+               .has_work               = dd_has_work,
+               .init_sched             = dd_init_sched,
+               .exit_sched             = dd_exit_sched,
+               .init_hctx              = dd_init_hctx,
+       },
+
+#ifdef CONFIG_BLK_DEBUG_FS
+       .queue_debugfs_attrs = deadline_queue_debugfs_attrs,
+#endif
+       .elevator_attrs = deadline_attrs,
+       .elevator_name = "mq-deadline",
+       .elevator_alias = "deadline",
+       .elevator_features = ELEVATOR_F_ZBD_SEQ_WRITE,
+       .elevator_owner = THIS_MODULE,
+};
+MODULE_ALIAS("mq-deadline-iosched");
+
+static int __init deadline_init(void)
+{
+       return elv_register(&mq_deadline);
+}
+
+static void __exit deadline_exit(void)
+{
+       elv_unregister(&mq_deadline);
+}
+
+module_init(deadline_init);
+module_exit(deadline_exit);
+
+MODULE_AUTHOR("Jens Axboe, Damien Le Moal and Bart Van Assche");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MQ deadline IO scheduler");
index 6e2a649..278593b 100644 (file)
@@ -264,7 +264,6 @@ config SYSV68_PARTITION
 
 config CMDLINE_PARTITION
        bool "Command line partition support" if PARTITION_ADVANCED
-       select BLK_CMDLINE_PARSER
        help
          Say Y here if you want to read the partition table from bootargs.
          The format for the command line is just like mtdparts.
index c64c57b..2c381c6 100644 (file)
@@ -275,7 +275,7 @@ int adfspart_check_ADFS(struct parsed_partitions *state)
        /*
         * Work out start of non-adfs partition.
         */
-       nr_sects = (state->bdev->bd_inode->i_size >> 9) - start_sect;
+       nr_sects = get_capacity(state->disk) - start_sect;
 
        if (start_sect) {
                switch (id) {
@@ -540,7 +540,7 @@ int adfspart_check_EESOX(struct parsed_partitions *state)
        if (i != 0) {
                sector_t size;
 
-               size = get_capacity(state->bdev->bd_disk);
+               size = get_capacity(state->disk);
                put_partition(state, slot++, start, size - start);
                strlcat(state->pp_buf, "\n", PAGE_SIZE);
        }
index c7b4fd1..85f4b96 100644 (file)
@@ -66,22 +66,6 @@ struct pvd {
 
 #define LVM_MAXLVS 256
 
-/**
- * last_lba(): return number of last logical block of device
- * @bdev: block device
- *
- * Description: Returns last LBA value on success, 0 on error.
- * This is stored (by sd and ide-geometry) in
- *  the part[0] entry for this disk, and is the number of
- *  physical sectors available on the disk.
- */
-static u64 last_lba(struct block_device *bdev)
-{
-       if (!bdev || !bdev->bd_inode)
-               return 0;
-       return (bdev->bd_inode->i_size >> 9) - 1ULL;
-}
-
 /**
  * read_lba(): Read bytes from disk, starting at given LBA
  * @state
@@ -89,7 +73,7 @@ static u64 last_lba(struct block_device *bdev)
  * @buffer
  * @count
  *
- * Description:  Reads @count bytes from @state->bdev into @buffer.
+ * Description:  Reads @count bytes from @state->disk into @buffer.
  * Returns number of bytes read on success, 0 on error.
  */
 static size_t read_lba(struct parsed_partitions *state, u64 lba, u8 *buffer,
@@ -97,7 +81,7 @@ static size_t read_lba(struct parsed_partitions *state, u64 lba, u8 *buffer,
 {
        size_t totalreadcount = 0;
 
-       if (!buffer || lba + count / 512 > last_lba(state->bdev))
+       if (!buffer || lba + count / 512 > get_capacity(state->disk) - 1ULL)
                return 0;
 
        while (count) {
index 9526491..5c8624e 100644 (file)
@@ -34,7 +34,6 @@ int amiga_partition(struct parsed_partitions *state)
        int start_sect, nr_sects, blk, part, res = 0;
        int blksize = 1;        /* Multiplier for disk block size */
        int slot = 1;
-       char b[BDEVNAME_SIZE];
 
        for (blk = 0; ; blk++, put_dev_sector(sect)) {
                if (blk == RDB_ALLOCATION_LIMIT)
@@ -42,7 +41,7 @@ int amiga_partition(struct parsed_partitions *state)
                data = read_part_sector(state, blk, &sect);
                if (!data) {
                        pr_err("Dev %s: unable to read RDB block %d\n",
-                              bdevname(state->bdev, b), blk);
+                              state->disk->disk_name, blk);
                        res = -1;
                        goto rdb_done;
                }
@@ -64,7 +63,7 @@ int amiga_partition(struct parsed_partitions *state)
                }
 
                pr_err("Dev %s: RDB in block %d has bad checksum\n",
-                      bdevname(state->bdev, b), blk);
+                      state->disk->disk_name, blk);
        }
 
        /* blksize is blocks per 512 byte standard block */
@@ -84,7 +83,7 @@ int amiga_partition(struct parsed_partitions *state)
                data = read_part_sector(state, blk, &sect);
                if (!data) {
                        pr_err("Dev %s: unable to read partition block %d\n",
-                              bdevname(state->bdev, b), blk);
+                              state->disk->disk_name, blk);
                        res = -1;
                        goto rdb_done;
                }
index 2305840..da59941 100644 (file)
@@ -47,7 +47,7 @@ int atari_partition(struct parsed_partitions *state)
         * ATARI partition scheme supports 512 lba only.  If this is not
         * the case, bail early to avoid miscalculating hd_size.
         */
-       if (bdev_logical_block_size(state->bdev) != 512)
+       if (queue_logical_block_size(state->disk->queue) != 512)
                return 0;
 
        rs = read_part_sector(state, 0, &sect);
@@ -55,7 +55,7 @@ int atari_partition(struct parsed_partitions *state)
                return -1;
 
        /* Verify this is an Atari rootsector: */
-       hd_size = state->bdev->bd_inode->i_size >> 9;
+       hd_size = get_capacity(state->disk);
        if (!VALID_PARTITION(&rs->part[0], hd_size) &&
            !VALID_PARTITION(&rs->part[1], hd_size) &&
            !VALID_PARTITION(&rs->part[2], hd_size) &&
index c577e9e..d5b28e3 100644 (file)
@@ -9,7 +9,7 @@
  * description.
  */
 struct parsed_partitions {
-       struct block_device *bdev;
+       struct gendisk *disk;
        char name[BDEVNAME_SIZE];
        struct {
                sector_t from;
index 8f545c3..1af610f 100644 (file)
  * For further information, see "Documentation/block/cmdline-partition.rst"
  *
  */
+#include <linux/blkdev.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include "check.h"
 
-#include <linux/cmdline-parser.h>
 
-#include "check.h"
+/* partition flags */
+#define PF_RDONLY                   0x01 /* Device is read only */
+#define PF_POWERUP_LOCK             0x02 /* Always locked after reset */
+
+struct cmdline_subpart {
+       char name[BDEVNAME_SIZE]; /* partition name, such as 'rootfs' */
+       sector_t from;
+       sector_t size;
+       int flags;
+       struct cmdline_subpart *next_subpart;
+};
+
+struct cmdline_parts {
+       char name[BDEVNAME_SIZE]; /* block device, such as 'mmcblk0' */
+       unsigned int nr_subparts;
+       struct cmdline_subpart *subpart;
+       struct cmdline_parts *next_parts;
+};
+
+static int parse_subpart(struct cmdline_subpart **subpart, char *partdef)
+{
+       int ret = 0;
+       struct cmdline_subpart *new_subpart;
+
+       *subpart = NULL;
+
+       new_subpart = kzalloc(sizeof(struct cmdline_subpart), GFP_KERNEL);
+       if (!new_subpart)
+               return -ENOMEM;
+
+       if (*partdef == '-') {
+               new_subpart->size = (sector_t)(~0ULL);
+               partdef++;
+       } else {
+               new_subpart->size = (sector_t)memparse(partdef, &partdef);
+               if (new_subpart->size < (sector_t)PAGE_SIZE) {
+                       pr_warn("cmdline partition size is invalid.");
+                       ret = -EINVAL;
+                       goto fail;
+               }
+       }
+
+       if (*partdef == '@') {
+               partdef++;
+               new_subpart->from = (sector_t)memparse(partdef, &partdef);
+       } else {
+               new_subpart->from = (sector_t)(~0ULL);
+       }
+
+       if (*partdef == '(') {
+               int length;
+               char *next = strchr(++partdef, ')');
+
+               if (!next) {
+                       pr_warn("cmdline partition format is invalid.");
+                       ret = -EINVAL;
+                       goto fail;
+               }
+
+               length = min_t(int, next - partdef,
+                              sizeof(new_subpart->name) - 1);
+               strncpy(new_subpart->name, partdef, length);
+               new_subpart->name[length] = '\0';
+
+               partdef = ++next;
+       } else
+               new_subpart->name[0] = '\0';
+
+       new_subpart->flags = 0;
+
+       if (!strncmp(partdef, "ro", 2)) {
+               new_subpart->flags |= PF_RDONLY;
+               partdef += 2;
+       }
+
+       if (!strncmp(partdef, "lk", 2)) {
+               new_subpart->flags |= PF_POWERUP_LOCK;
+               partdef += 2;
+       }
+
+       *subpart = new_subpart;
+       return 0;
+fail:
+       kfree(new_subpart);
+       return ret;
+}
+
+static void free_subpart(struct cmdline_parts *parts)
+{
+       struct cmdline_subpart *subpart;
+
+       while (parts->subpart) {
+               subpart = parts->subpart;
+               parts->subpart = subpart->next_subpart;
+               kfree(subpart);
+       }
+}
+
+static int parse_parts(struct cmdline_parts **parts, const char *bdevdef)
+{
+       int ret = -EINVAL;
+       char *next;
+       int length;
+       struct cmdline_subpart **next_subpart;
+       struct cmdline_parts *newparts;
+       char buf[BDEVNAME_SIZE + 32 + 4];
+
+       *parts = NULL;
+
+       newparts = kzalloc(sizeof(struct cmdline_parts), GFP_KERNEL);
+       if (!newparts)
+               return -ENOMEM;
+
+       next = strchr(bdevdef, ':');
+       if (!next) {
+               pr_warn("cmdline partition has no block device.");
+               goto fail;
+       }
+
+       length = min_t(int, next - bdevdef, sizeof(newparts->name) - 1);
+       strncpy(newparts->name, bdevdef, length);
+       newparts->name[length] = '\0';
+       newparts->nr_subparts = 0;
+
+       next_subpart = &newparts->subpart;
+
+       while (next && *(++next)) {
+               bdevdef = next;
+               next = strchr(bdevdef, ',');
+
+               length = (!next) ? (sizeof(buf) - 1) :
+                       min_t(int, next - bdevdef, sizeof(buf) - 1);
+
+               strncpy(buf, bdevdef, length);
+               buf[length] = '\0';
+
+               ret = parse_subpart(next_subpart, buf);
+               if (ret)
+                       goto fail;
+
+               newparts->nr_subparts++;
+               next_subpart = &(*next_subpart)->next_subpart;
+       }
+
+       if (!newparts->subpart) {
+               pr_warn("cmdline partition has no valid partition.");
+               ret = -EINVAL;
+               goto fail;
+       }
+
+       *parts = newparts;
+
+       return 0;
+fail:
+       free_subpart(newparts);
+       kfree(newparts);
+       return ret;
+}
+
+static void cmdline_parts_free(struct cmdline_parts **parts)
+{
+       struct cmdline_parts *next_parts;
+
+       while (*parts) {
+               next_parts = (*parts)->next_parts;
+               free_subpart(*parts);
+               kfree(*parts);
+               *parts = next_parts;
+       }
+}
+
+static int cmdline_parts_parse(struct cmdline_parts **parts,
+               const char *cmdline)
+{
+       int ret;
+       char *buf;
+       char *pbuf;
+       char *next;
+       struct cmdline_parts **next_parts;
+
+       *parts = NULL;
+
+       next = pbuf = buf = kstrdup(cmdline, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       next_parts = parts;
+
+       while (next && *pbuf) {
+               next = strchr(pbuf, ';');
+               if (next)
+                       *next = '\0';
+
+               ret = parse_parts(next_parts, pbuf);
+               if (ret)
+                       goto fail;
+
+               if (next)
+                       pbuf = ++next;
+
+               next_parts = &(*next_parts)->next_parts;
+       }
+
+       if (!*parts) {
+               pr_warn("cmdline partition has no valid partition.");
+               ret = -EINVAL;
+               goto fail;
+       }
+
+       ret = 0;
+done:
+       kfree(buf);
+       return ret;
+
+fail:
+       cmdline_parts_free(parts);
+       goto done;
+}
+
+static struct cmdline_parts *cmdline_parts_find(struct cmdline_parts *parts,
+                                        const char *bdev)
+{
+       while (parts && strncmp(bdev, parts->name, sizeof(parts->name)))
+               parts = parts->next_parts;
+       return parts;
+}
 
 static char *cmdline;
 static struct cmdline_parts *bdev_parts;
 
-static int add_part(int slot, struct cmdline_subpart *subpart, void *param)
+static int add_part(int slot, struct cmdline_subpart *subpart,
+               struct parsed_partitions *state)
 {
        int label_min;
        struct partition_meta_info *info;
        char tmp[sizeof(info->volname) + 4];
-       struct parsed_partitions *state = (struct parsed_partitions *)param;
 
        if (slot >= state->limit)
                return 1;
@@ -50,6 +278,35 @@ static int add_part(int slot, struct cmdline_subpart *subpart, void *param)
        return 0;
 }
 
+static int cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size,
+               struct parsed_partitions *state)
+{
+       sector_t from = 0;
+       struct cmdline_subpart *subpart;
+       int slot = 1;
+
+       for (subpart = parts->subpart; subpart;
+            subpart = subpart->next_subpart, slot++) {
+               if (subpart->from == (sector_t)(~0ULL))
+                       subpart->from = from;
+               else
+                       from = subpart->from;
+
+               if (from >= disk_size)
+                       break;
+
+               if (subpart->size > (disk_size - from))
+                       subpart->size = disk_size - from;
+
+               from += subpart->size;
+
+               if (add_part(slot, subpart, state))
+                       break;
+       }
+
+       return slot;
+}
+
 static int __init cmdline_parts_setup(char *s)
 {
        cmdline = s;
@@ -123,7 +380,6 @@ static void cmdline_parts_verifier(int slot, struct parsed_partitions *state)
 int cmdline_partition(struct parsed_partitions *state)
 {
        sector_t disk_size;
-       char bdev[BDEVNAME_SIZE];
        struct cmdline_parts *parts;
 
        if (cmdline) {
@@ -140,14 +396,13 @@ int cmdline_partition(struct parsed_partitions *state)
        if (!bdev_parts)
                return 0;
 
-       bdevname(state->bdev, bdev);
-       parts = cmdline_parts_find(bdev_parts, bdev);
+       parts = cmdline_parts_find(bdev_parts, state->disk->disk_name);
        if (!parts)
                return 0;
 
-       disk_size = get_capacity(state->bdev->bd_disk) << 9;
+       disk_size = get_capacity(state->disk) << 9;
 
-       cmdline_parts_set(parts, disk_size, 1, add_part, (void *)state);
+       cmdline_parts_set(parts, disk_size, state);
        cmdline_parts_verifier(1, state);
 
        strlcat(state->pp_buf, "\n", PAGE_SIZE);
index 4230d4f..58c4c36 100644 (file)
@@ -135,8 +135,8 @@ static struct parsed_partitions *check_partition(struct gendisk *hd)
        }
        state->pp_buf[0] = '\0';
 
-       state->bdev = hd->part0;
-       disk_name(hd, 0, state->name);
+       state->disk = hd;
+       snprintf(state->name, BDEVNAME_SIZE, "%s", hd->disk_name);
        snprintf(state->pp_buf, PAGE_SIZE, " %s:", state->name);
        if (isdigit(state->name[strlen(state->name)-1]))
                sprintf(state->name, "p");
@@ -259,9 +259,8 @@ static const struct attribute_group *part_attr_groups[] = {
 
 static void part_release(struct device *dev)
 {
-       if (MAJOR(dev->devt) == BLOCK_EXT_MAJOR)
-               blk_free_ext_minor(MINOR(dev->devt));
-       bdput(dev_to_bdev(dev));
+       put_disk(dev_to_bdev(dev)->bd_disk);
+       iput(dev_to_bdev(dev)->bd_inode);
 }
 
 static int part_uevent(struct device *dev, struct kobj_uevent_env *env)
@@ -281,12 +280,10 @@ struct device_type part_type = {
        .uevent         = part_uevent,
 };
 
-/*
- * Must be called either with open_mutex held, before a disk can be opened or
- * after all disk users are gone.
- */
 static void delete_partition(struct block_device *part)
 {
+       lockdep_assert_held(&part->bd_disk->open_mutex);
+
        fsync_bdev(part);
        __invalidate_device(part, true);
 
@@ -351,20 +348,17 @@ static struct block_device *add_partition(struct gendisk *disk, int partno,
        if (xa_load(&disk->part_tbl, partno))
                return ERR_PTR(-EBUSY);
 
+       /* ensure we always have a reference to the whole disk */
+       get_device(disk_to_dev(disk));
+
+       err = -ENOMEM;
        bdev = bdev_alloc(disk, partno);
        if (!bdev)
-               return ERR_PTR(-ENOMEM);
+               goto out_put_disk;
 
        bdev->bd_start_sect = start;
        bdev_set_nr_sectors(bdev, len);
 
-       if (info) {
-               err = -ENOMEM;
-               bdev->bd_meta_info = kmemdup(info, sizeof(*info), GFP_KERNEL);
-               if (!bdev->bd_meta_info)
-                       goto out_bdput;
-       }
-
        pdev = &bdev->bd_device;
        dname = dev_name(ddev);
        if (isdigit(dname[strlen(dname) - 1]))
@@ -388,6 +382,13 @@ static struct block_device *add_partition(struct gendisk *disk, int partno,
        }
        pdev->devt = devt;
 
+       if (info) {
+               err = -ENOMEM;
+               bdev->bd_meta_info = kmemdup(info, sizeof(*info), GFP_KERNEL);
+               if (!bdev->bd_meta_info)
+                       goto out_put;
+       }
+
        /* delay uevent until 'holders' subdir is created */
        dev_set_uevent_suppress(pdev, 1);
        err = device_add(pdev);
@@ -417,14 +418,13 @@ static struct block_device *add_partition(struct gendisk *disk, int partno,
                kobject_uevent(&pdev->kobj, KOBJ_ADD);
        return bdev;
 
-out_bdput:
-       bdput(bdev);
-       return ERR_PTR(err);
 out_del:
        kobject_put(bdev->bd_holder_dir);
        device_del(pdev);
 out_put:
        put_device(pdev);
+out_put_disk:
+       put_disk(disk);
        return ERR_PTR(err);
 }
 
@@ -449,15 +449,14 @@ static bool partition_overlaps(struct gendisk *disk, sector_t start,
        return overlap;
 }
 
-int bdev_add_partition(struct block_device *bdev, int partno,
-               sector_t start, sector_t length)
+int bdev_add_partition(struct gendisk *disk, int partno, sector_t start,
+               sector_t length)
 {
        struct block_device *part;
-       struct gendisk *disk = bdev->bd_disk;
        int ret;
 
        mutex_lock(&disk->open_mutex);
-       if (!(disk->flags & GENHD_FL_UP)) {
+       if (!disk_live(disk)) {
                ret = -ENXIO;
                goto out;
        }
@@ -475,13 +474,13 @@ out:
        return ret;
 }
 
-int bdev_del_partition(struct block_device *bdev, int partno)
+int bdev_del_partition(struct gendisk *disk, int partno)
 {
        struct block_device *part = NULL;
        int ret = -ENXIO;
 
-       mutex_lock(&bdev->bd_disk->open_mutex);
-       part = xa_load(&bdev->bd_disk->part_tbl, partno);
+       mutex_lock(&disk->open_mutex);
+       part = xa_load(&disk->part_tbl, partno);
        if (!part)
                goto out_unlock;
 
@@ -492,18 +491,18 @@ int bdev_del_partition(struct block_device *bdev, int partno)
        delete_partition(part);
        ret = 0;
 out_unlock:
-       mutex_unlock(&bdev->bd_disk->open_mutex);
+       mutex_unlock(&disk->open_mutex);
        return ret;
 }
 
-int bdev_resize_partition(struct block_device *bdev, int partno,
-               sector_t start, sector_t length)
+int bdev_resize_partition(struct gendisk *disk, int partno, sector_t start,
+               sector_t length)
 {
        struct block_device *part = NULL;
        int ret = -ENXIO;
 
-       mutex_lock(&bdev->bd_disk->open_mutex);
-       part = xa_load(&bdev->bd_disk->part_tbl, partno);
+       mutex_lock(&disk->open_mutex);
+       part = xa_load(&disk->part_tbl, partno);
        if (!part)
                goto out_unlock;
 
@@ -512,14 +511,14 @@ int bdev_resize_partition(struct block_device *bdev, int partno,
                goto out_unlock;
 
        ret = -EBUSY;
-       if (partition_overlaps(bdev->bd_disk, start, length, partno))
+       if (partition_overlaps(disk, start, length, partno))
                goto out_unlock;
 
        bdev_set_nr_sectors(part, length);
 
        ret = 0;
 out_unlock:
-       mutex_unlock(&bdev->bd_disk->open_mutex);
+       mutex_unlock(&disk->open_mutex);
        return ret;
 }
 
@@ -667,7 +666,7 @@ int bdev_disk_changed(struct gendisk *disk, bool invalidate)
 
        lockdep_assert_held(&disk->open_mutex);
 
-       if (!(disk->flags & GENHD_FL_UP))
+       if (!disk_live(disk))
                return -ENXIO;
 
 rescan:
@@ -715,10 +714,10 @@ EXPORT_SYMBOL_GPL(bdev_disk_changed);
 
 void *read_part_sector(struct parsed_partitions *state, sector_t n, Sector *p)
 {
-       struct address_space *mapping = state->bdev->bd_inode->i_mapping;
+       struct address_space *mapping = state->disk->part0->bd_inode->i_mapping;
        struct page *page;
 
-       if (n >= get_capacity(state->bdev->bd_disk)) {
+       if (n >= get_capacity(state->disk)) {
                state->access_beyond_eod = true;
                return NULL;
        }
index e271679..7ca5c4c 100644 (file)
@@ -124,19 +124,17 @@ efi_crc32(const void *buf, unsigned long len)
 
 /**
  * last_lba(): return number of last logical block of device
- * @bdev: block device
+ * @disk: block device
  * 
  * Description: Returns last LBA value on success, 0 on error.
  * This is stored (by sd and ide-geometry) in
  *  the part[0] entry for this disk, and is the number of
  *  physical sectors available on the disk.
  */
-static u64 last_lba(struct block_device *bdev)
+static u64 last_lba(struct gendisk *disk)
 {
-       if (!bdev || !bdev->bd_inode)
-               return 0;
-       return div_u64(bdev->bd_inode->i_size,
-                      bdev_logical_block_size(bdev)) - 1ULL;
+       return div_u64(disk->part0->bd_inode->i_size,
+                      queue_logical_block_size(disk->queue)) - 1ULL;
 }
 
 static inline int pmbr_part_valid(gpt_mbr_record *part)
@@ -231,17 +229,17 @@ done:
  * @buffer: destination buffer
  * @count: bytes to read
  *
- * Description: Reads @count bytes from @state->bdev into @buffer.
+ * Description: Reads @count bytes from @state->disk into @buffer.
  * Returns number of bytes read on success, 0 on error.
  */
 static size_t read_lba(struct parsed_partitions *state,
                       u64 lba, u8 *buffer, size_t count)
 {
        size_t totalreadcount = 0;
-       struct block_device *bdev = state->bdev;
-       sector_t n = lba * (bdev_logical_block_size(bdev) / 512);
+       sector_t n = lba *
+               (queue_logical_block_size(state->disk->queue) / 512);
 
-       if (!buffer || lba > last_lba(bdev))
+       if (!buffer || lba > last_lba(state->disk))
                 return 0;
 
        while (count) {
@@ -302,14 +300,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
  * @lba: the Logical Block Address of the partition table
  * 
  * Description: returns GPT header on success, NULL on error.   Allocates
- * and fills a GPT header starting at @ from @state->bdev.
+ * and fills a GPT header starting at @ from @state->disk.
  * Note: remember to free gpt when finished with it.
  */
 static gpt_header *alloc_read_gpt_header(struct parsed_partitions *state,
                                         u64 lba)
 {
        gpt_header *gpt;
-       unsigned ssz = bdev_logical_block_size(state->bdev);
+       unsigned ssz = queue_logical_block_size(state->disk->queue);
 
        gpt = kmalloc(ssz, GFP_KERNEL);
        if (!gpt)
@@ -356,10 +354,10 @@ static int is_gpt_valid(struct parsed_partitions *state, u64 lba,
 
        /* Check the GUID Partition Table header size is too big */
        if (le32_to_cpu((*gpt)->header_size) >
-                       bdev_logical_block_size(state->bdev)) {
+                       queue_logical_block_size(state->disk->queue)) {
                pr_debug("GUID Partition Table Header size is too large: %u > %u\n",
                        le32_to_cpu((*gpt)->header_size),
-                       bdev_logical_block_size(state->bdev));
+                       queue_logical_block_size(state->disk->queue));
                goto fail;
        }
 
@@ -395,7 +393,7 @@ static int is_gpt_valid(struct parsed_partitions *state, u64 lba,
        /* Check the first_usable_lba and last_usable_lba are
         * within the disk.
         */
-       lastlba = last_lba(state->bdev);
+       lastlba = last_lba(state->disk);
        if (le64_to_cpu((*gpt)->first_usable_lba) > lastlba) {
                pr_debug("GPT: first_usable_lba incorrect: %lld > %lld\n",
                         (unsigned long long)le64_to_cpu((*gpt)->first_usable_lba),
@@ -587,13 +585,15 @@ static int find_valid_gpt(struct parsed_partitions *state, gpt_header **gpt,
        gpt_header *pgpt = NULL, *agpt = NULL;
        gpt_entry *pptes = NULL, *aptes = NULL;
        legacy_mbr *legacymbr;
-       sector_t total_sectors = i_size_read(state->bdev->bd_inode) >> 9;
+       struct gendisk *disk = state->disk;
+       const struct block_device_operations *fops = disk->fops;
+       sector_t total_sectors = get_capacity(state->disk);
        u64 lastlba;
 
        if (!ptes)
                return 0;
 
-       lastlba = last_lba(state->bdev);
+       lastlba = last_lba(state->disk);
         if (!force_gpt) {
                /* This will be added to the EFI Spec. per Intel after v1.02. */
                legacymbr = kzalloc(sizeof(*legacymbr), GFP_KERNEL);
@@ -621,6 +621,16 @@ static int find_valid_gpt(struct parsed_partitions *state, gpt_header **gpt,
         if (!good_agpt && force_gpt)
                 good_agpt = is_gpt_valid(state, lastlba, &agpt, &aptes);
 
+       if (!good_agpt && force_gpt && fops->alternative_gpt_sector) {
+               sector_t agpt_sector;
+               int err;
+
+               err = fops->alternative_gpt_sector(disk, &agpt_sector);
+               if (!err)
+                       good_agpt = is_gpt_valid(state, agpt_sector,
+                                                &agpt, &aptes);
+       }
+
         /* The obviously unsuccessful case */
         if (!good_pgpt && !good_agpt)
                 goto fail;
@@ -705,7 +715,7 @@ int efi_partition(struct parsed_partitions *state)
        gpt_header *gpt = NULL;
        gpt_entry *ptes = NULL;
        u32 i;
-       unsigned ssz = bdev_logical_block_size(state->bdev) / 512;
+       unsigned ssz = queue_logical_block_size(state->disk->queue) / 512;
 
        if (!find_valid_gpt(state, &gpt, &ptes) || !gpt || !ptes) {
                kfree(gpt);
@@ -722,7 +732,7 @@ int efi_partition(struct parsed_partitions *state)
                u64 size = le64_to_cpu(ptes[i].ending_lba) -
                           le64_to_cpu(ptes[i].starting_lba) + 1ULL;
 
-               if (!is_pte_valid(&ptes[i], last_lba(state->bdev)))
+               if (!is_pte_valid(&ptes[i], last_lba(state->disk)))
                        continue;
 
                put_partition(state, i+1, start * ssz, size * ssz);
index 4b044e6..9bca396 100644 (file)
@@ -290,8 +290,8 @@ static int find_cms1_partitions(struct parsed_partitions *state,
 int ibm_partition(struct parsed_partitions *state)
 {
        int (*fn)(struct gendisk *disk, dasd_information2_t *info);
-       struct block_device *bdev = state->bdev;
-       struct gendisk *disk = bdev->bd_disk;
+       struct gendisk *disk = state->disk;
+       struct block_device *bdev = disk->part0;
        int blocksize, res;
        loff_t i_size, offset, size;
        dasd_information2_t *info;
index cc86534..27f6c7d 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-or-later
-/**
+/*
  * ldm - Support for Windows Logical Disk Manager (Dynamic Disks)
  *
  * Copyright (C) 2001,2002 Richard Russon <ldm@flatcap.org>
@@ -304,7 +304,7 @@ static bool ldm_validate_privheads(struct parsed_partitions *state,
                }
        }
 
-       num_sects = state->bdev->bd_inode->i_size >> 9;
+       num_sects = get_capacity(state->disk);
 
        if ((ph[0]->config_start > num_sects) ||
           ((ph[0]->config_start + ph[0]->config_size) > num_sects)) {
@@ -339,11 +339,11 @@ out:
 /**
  * ldm_validate_tocblocks - Validate the table of contents and its backups
  * @state: Partition check state including device holding the LDM Database
- * @base:  Offset, into @state->bdev, of the database
+ * @base:  Offset, into @state->disk, of the database
  * @ldb:   Cache of the database structures
  *
  * Find and compare the four tables of contents of the LDM Database stored on
- * @state->bdev and return the parsed information into @toc1.
+ * @state->disk and return the parsed information into @toc1.
  *
  * The offsets and sizes of the configs are range-checked against a privhead.
  *
@@ -486,8 +486,8 @@ out:
  *       only likely to happen if the underlying device is strange.  If that IS
  *       the case we should return zero to let someone else try.
  *
- * Return:  'true'   @state->bdev is a dynamic disk
- *          'false'  @state->bdev is not a dynamic disk, or an error occurred
+ * Return:  'true'   @state->disk is a dynamic disk
+ *          'false'  @state->disk is not a dynamic disk, or an error occurred
  */
 static bool ldm_validate_partition_table(struct parsed_partitions *state)
 {
@@ -1340,7 +1340,7 @@ static bool ldm_frag_commit (struct list_head *frags, struct ldmdb *ldb)
 /**
  * ldm_get_vblks - Read the on-disk database of VBLKs into memory
  * @state: Partition check state including device holding the LDM Database
- * @base:  Offset, into @state->bdev, of the database
+ * @base:  Offset, into @state->disk, of the database
  * @ldb:   Cache of the database structures
  *
  * To use the information from the VBLKs, they need to be read from the disk,
@@ -1432,10 +1432,10 @@ static void ldm_free_vblks (struct list_head *lh)
  * example, if the device is hda, we would have: hda1: LDM database, hda2, hda3,
  * and so on: the actual data containing partitions.
  *
- * Return:  1 Success, @state->bdev is a dynamic disk and we handled it
- *          0 Success, @state->bdev is not a dynamic disk
+ * Return:  1 Success, @state->disk is a dynamic disk and we handled it
+ *          0 Success, @state->disk is not a dynamic disk
  *         -1 An error occurred before enough information had been read
- *            Or @state->bdev is a dynamic disk, but it may be corrupted
+ *            Or @state->disk is a dynamic disk, but it may be corrupted
  */
 int ldm_partition(struct parsed_partitions *state)
 {
index b609533..7b521df 100644 (file)
@@ -133,7 +133,7 @@ int mac_partition(struct parsed_partitions *state)
        }
 #ifdef CONFIG_PPC_PMAC
        if (found_root_goodness)
-               note_bootable_part(state->bdev->bd_dev, found_root,
+               note_bootable_part(state->disk->part0->bd_dev, found_root,
                                   found_root_goodness);
 #endif
 
index f510259..b5d5c22 100644 (file)
@@ -135,11 +135,12 @@ static void parse_extended(struct parsed_partitions *state,
        Sector sect;
        unsigned char *data;
        sector_t this_sector, this_size;
-       sector_t sector_size = bdev_logical_block_size(state->bdev) / 512;
+       sector_t sector_size;
        int loopct = 0;         /* number of links followed
                                   without finding a data partition */
        int i;
 
+       sector_size = queue_logical_block_size(state->disk->queue) / 512;
        this_sector = first_sector;
        this_size = first_size;
 
@@ -579,7 +580,7 @@ static struct {
 
 int msdos_partition(struct parsed_partitions *state)
 {
-       sector_t sector_size = bdev_logical_block_size(state->bdev) / 512;
+       sector_t sector_size;
        Sector sect;
        unsigned char *data;
        struct msdos_partition *p;
@@ -587,6 +588,7 @@ int msdos_partition(struct parsed_partitions *state)
        int slot;
        u32 disksig;
 
+       sector_size = queue_logical_block_size(state->disk->queue) / 512;
        data = read_part_sector(state, 0, &sect);
        if (!data)
                return -1;
index 4273f1b..9cc6b8c 100644 (file)
@@ -43,7 +43,6 @@ int sgi_partition(struct parsed_partitions *state)
        Sector sect;
        struct sgi_disklabel *label;
        struct sgi_partition *p;
-       char b[BDEVNAME_SIZE];
 
        label = read_part_sector(state, 0, &sect);
        if (!label)
@@ -52,7 +51,7 @@ int sgi_partition(struct parsed_partitions *state)
        magic = label->magic_mushroom;
        if(be32_to_cpu(magic) != SGI_LABEL_MAGIC) {
                /*printk("Dev %s SGI disklabel: bad magic %08x\n",
-                      bdevname(bdev, b), be32_to_cpu(magic));*/
+                      state->disk->disk_name, be32_to_cpu(magic));*/
                put_dev_sector(sect);
                return 0;
        }
@@ -63,7 +62,7 @@ int sgi_partition(struct parsed_partitions *state)
        }
        if(csum) {
                printk(KERN_WARNING "Dev %s SGI disklabel: csum bad, label corrupted\n",
-                      bdevname(state->bdev, b));
+                      state->disk->disk_name);
                put_dev_sector(sect);
                return 0;
        }
index 47dc53e..ddf9e6d 100644 (file)
@@ -65,7 +65,6 @@ int sun_partition(struct parsed_partitions *state)
        } * label;
        struct sun_partition *p;
        unsigned long spc;
-       char b[BDEVNAME_SIZE];
        int use_vtoc;
        int nparts;
 
@@ -76,7 +75,7 @@ int sun_partition(struct parsed_partitions *state)
        p = label->partitions;
        if (be16_to_cpu(label->magic) != SUN_LABEL_MAGIC) {
 /*             printk(KERN_INFO "Dev %s Sun disklabel: bad magic %04x\n",
-                      bdevname(bdev, b), be16_to_cpu(label->magic)); */
+                      state->disk->disk_name, be16_to_cpu(label->magic)); */
                put_dev_sector(sect);
                return 0;
        }
@@ -86,7 +85,7 @@ int sun_partition(struct parsed_partitions *state)
                csum ^= *ush--;
        if (csum) {
                printk("Dev %s Sun disklabel: Csum bad, label corrupted\n",
-                      bdevname(state->bdev, b));
+                      state->disk->disk_name);
                put_dev_sector(sect);
                return 0;
        }
index d910534..00c203b 100644 (file)
@@ -147,11 +147,10 @@ static void t10_pi_type1_prepare(struct request *rq)
                        break;
 
                bip_for_each_vec(iv, bip, iter) {
-                       void *p, *pmap;
                        unsigned int j;
+                       void *p;
 
-                       pmap = kmap_atomic(iv.bv_page);
-                       p = pmap + iv.bv_offset;
+                       p = bvec_kmap_local(&iv);
                        for (j = 0; j < iv.bv_len; j += tuple_sz) {
                                struct t10_pi_tuple *pi = p;
 
@@ -161,8 +160,7 @@ static void t10_pi_type1_prepare(struct request *rq)
                                ref_tag++;
                                p += tuple_sz;
                        }
-
-                       kunmap_atomic(pmap);
+                       kunmap_local(p);
                }
 
                bip->bip_flags |= BIP_MAPPED_INTEGRITY;
@@ -195,11 +193,10 @@ static void t10_pi_type1_complete(struct request *rq, unsigned int nr_bytes)
                struct bvec_iter iter;
 
                bip_for_each_vec(iv, bip, iter) {
-                       void *p, *pmap;
                        unsigned int j;
+                       void *p;
 
-                       pmap = kmap_atomic(iv.bv_page);
-                       p = pmap + iv.bv_offset;
+                       p = bvec_kmap_local(&iv);
                        for (j = 0; j < iv.bv_len && intervals; j += tuple_sz) {
                                struct t10_pi_tuple *pi = p;
 
@@ -210,8 +207,7 @@ static void t10_pi_type1_complete(struct request *rq, unsigned int nr_bytes)
                                intervals--;
                                p += tuple_sz;
                        }
-
-                       kunmap_atomic(pmap);
+                       kunmap_local(p);
                }
        }
 }
index f4e6111..ae7f2e8 100644 (file)
@@ -15,6 +15,32 @@ config MODULE_SIG_KEY
          then the kernel will automatically generate the private key and
          certificate as described in Documentation/admin-guide/module-signing.rst
 
+choice
+       prompt "Type of module signing key to be generated"
+       default MODULE_SIG_KEY_TYPE_RSA
+       help
+        The type of module signing key type to generate. This option
+        does not apply if a #PKCS11 URI is used.
+
+config MODULE_SIG_KEY_TYPE_RSA
+       bool "RSA"
+       depends on MODULE_SIG || (IMA_APPRAISE_MODSIG && MODULES)
+       help
+        Use an RSA key for module signing.
+
+config MODULE_SIG_KEY_TYPE_ECDSA
+       bool "ECDSA"
+       select CRYPTO_ECDSA
+       depends on MODULE_SIG || (IMA_APPRAISE_MODSIG && MODULES)
+       help
+        Use an elliptic curve key (NIST P384) for module signing. Consider
+        using a strong hash like sha256 or sha384 for hashing modules.
+
+        Note: Remove all ECDSA signing keys, e.g. certs/signing_key.pem,
+        when falling back to building Linux 5.14 and older kernels.
+
+endchoice
+
 config SYSTEM_TRUSTED_KEYRING
        bool "Provide system-wide ring of trusted keys"
        depends on KEYS
index 359239a..2794337 100644 (file)
@@ -57,11 +57,31 @@ endif
 redirect_openssl       = 2>&1
 quiet_redirect_openssl = 2>&1
 silent_redirect_openssl = 2>/dev/null
+openssl_available       = $(shell openssl help 2>/dev/null && echo yes)
 
 # We do it this way rather than having a boolean option for enabling an
 # external private key, because 'make randconfig' might enable such a
 # boolean option and we unfortunately can't make it depend on !RANDCONFIG.
 ifeq ($(CONFIG_MODULE_SIG_KEY),"certs/signing_key.pem")
+
+ifeq ($(openssl_available),yes)
+X509TEXT=$(shell openssl x509 -in "certs/signing_key.pem" -text 2>/dev/null)
+endif
+
+# Support user changing key type
+ifdef CONFIG_MODULE_SIG_KEY_TYPE_ECDSA
+keytype_openssl = -newkey ec -pkeyopt ec_paramgen_curve:secp384r1
+ifeq ($(openssl_available),yes)
+$(if $(findstring id-ecPublicKey,$(X509TEXT)),,$(shell rm -f "certs/signing_key.pem"))
+endif
+endif # CONFIG_MODULE_SIG_KEY_TYPE_ECDSA
+
+ifdef CONFIG_MODULE_SIG_KEY_TYPE_RSA
+ifeq ($(openssl_available),yes)
+$(if $(findstring rsaEncryption,$(X509TEXT)),,$(shell rm -f "certs/signing_key.pem"))
+endif
+endif # CONFIG_MODULE_SIG_KEY_TYPE_RSA
+
 $(obj)/signing_key.pem: $(obj)/x509.genkey
        @$(kecho) "###"
        @$(kecho) "### Now generating an X.509 key pair to be used for signing modules."
@@ -75,6 +95,7 @@ $(obj)/signing_key.pem: $(obj)/x509.genkey
                -batch -x509 -config $(obj)/x509.genkey \
                -outform PEM -out $(obj)/signing_key.pem \
                -keyout $(obj)/signing_key.pem \
+               $(keytype_openssl) \
                $($(quiet)redirect_openssl)
        @$(kecho) "###"
        @$(kecho) "### Key pair generated."
index ca3b02d..536df4b 100644 (file)
@@ -1547,6 +1547,7 @@ config CRYPTO_SERPENT_AVX2_X86_64
 config CRYPTO_SM4
        tristate "SM4 cipher algorithm"
        select CRYPTO_ALGAPI
+       select CRYPTO_LIB_SM4
        help
          SM4 cipher algorithms (OSCCA GB/T 32907-2016).
 
@@ -1569,6 +1570,49 @@ config CRYPTO_SM4
 
          If unsure, say N.
 
+config CRYPTO_SM4_AESNI_AVX_X86_64
+       tristate "SM4 cipher algorithm (x86_64/AES-NI/AVX)"
+       depends on X86 && 64BIT
+       select CRYPTO_SKCIPHER
+       select CRYPTO_SIMD
+       select CRYPTO_ALGAPI
+       select CRYPTO_LIB_SM4
+       help
+         SM4 cipher algorithms (OSCCA GB/T 32907-2016) (x86_64/AES-NI/AVX).
+
+         SM4 (GBT.32907-2016) is a cryptographic standard issued by the
+         Organization of State Commercial Administration of China (OSCCA)
+         as an authorized cryptographic algorithms for the use within China.
+
+         This is SM4 optimized implementation using AES-NI/AVX/x86_64
+         instruction set for block cipher. Through two affine transforms,
+         we can use the AES S-Box to simulate the SM4 S-Box to achieve the
+         effect of instruction acceleration.
+
+         If unsure, say N.
+
+config CRYPTO_SM4_AESNI_AVX2_X86_64
+       tristate "SM4 cipher algorithm (x86_64/AES-NI/AVX2)"
+       depends on X86 && 64BIT
+       select CRYPTO_SKCIPHER
+       select CRYPTO_SIMD
+       select CRYPTO_ALGAPI
+       select CRYPTO_LIB_SM4
+       select CRYPTO_SM4_AESNI_AVX_X86_64
+       help
+         SM4 cipher algorithms (OSCCA GB/T 32907-2016) (x86_64/AES-NI/AVX2).
+
+         SM4 (GBT.32907-2016) is a cryptographic standard issued by the
+         Organization of State Commercial Administration of China (OSCCA)
+         as an authorized cryptographic algorithms for the use within China.
+
+         This is SM4 optimized implementation using AES-NI/AVX2/x86_64
+         instruction set for block cipher. Through two affine transforms,
+         we can use the AES S-Box to simulate the SM4 S-Box to achieve the
+         effect of instruction acceleration.
+
+         If unsure, say N.
+
 config CRYPTO_TEA
        tristate "TEA, XTEA and XETA cipher algorithms"
        depends on CRYPTO_USER_API_ENABLE_OBSOLETE
@@ -1768,7 +1812,7 @@ config CRYPTO_DRBG_HMAC
        bool
        default y
        select CRYPTO_HMAC
-       select CRYPTO_SHA256
+       select CRYPTO_SHA512
 
 config CRYPTO_DRBG_HASH
        bool "Enable Hash DRBG"
index 10526d4..c633f15 100644 (file)
@@ -74,7 +74,6 @@ obj-$(CONFIG_CRYPTO_NULL2) += crypto_null.o
 obj-$(CONFIG_CRYPTO_MD4) += md4.o
 obj-$(CONFIG_CRYPTO_MD5) += md5.o
 obj-$(CONFIG_CRYPTO_RMD160) += rmd160.o
-obj-$(CONFIG_CRYPTO_RMD320) += rmd320.o
 obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o
 obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o
 obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o
index 967329e..6592279 100644 (file)
@@ -269,6 +269,14 @@ int pkcs7_sig_note_pkey_algo(void *context, size_t hdrlen,
                ctx->sinfo->sig->pkey_algo = "rsa";
                ctx->sinfo->sig->encoding = "pkcs1";
                break;
+       case OID_id_ecdsa_with_sha1:
+       case OID_id_ecdsa_with_sha224:
+       case OID_id_ecdsa_with_sha256:
+       case OID_id_ecdsa_with_sha384:
+       case OID_id_ecdsa_with_sha512:
+               ctx->sinfo->sig->pkey_algo = "ecdsa";
+               ctx->sinfo->sig->encoding = "x962";
+               break;
        default:
                printk("Unsupported pkey algo: %u\n", ctx->last_oid);
                return -ENOPKG;
index a006132..1350e8e 100644 (file)
@@ -27,6 +27,7 @@
 #define _CRYPTO_ECC_H
 
 #include <crypto/ecc_curve.h>
+#include <asm/unaligned.h>
 
 /* One digit is u64 qword. */
 #define ECC_CURVE_NIST_P192_DIGITS  3
  * @out:      Output array
  * @ndigits:  Number of digits to copy
  */
-static inline void ecc_swap_digits(const u64 *in, u64 *out, unsigned int ndigits)
+static inline void ecc_swap_digits(const void *in, u64 *out, unsigned int ndigits)
 {
        const __be64 *src = (__force __be64 *)in;
        int i;
 
        for (i = 0; i < ndigits; i++)
-               out[i] = be64_to_cpu(src[ndigits - 1 - i]);
+               out[i] = get_unaligned_be64(&src[ndigits - 1 - i]);
 }
 
 /**
index c72d72a..be70e76 100644 (file)
@@ -143,9 +143,6 @@ sha512_transform(u64 *state, const u8 *input)
 
        state[0] += a; state[1] += b; state[2] += c; state[3] += d;
        state[4] += e; state[5] += f; state[6] += g; state[7] += h;
-
-       /* erase our data */
-       a = b = c = d = e = f = g = h = t1 = t2 = 0;
 }
 
 static void sha512_generic_block_fn(struct sha512_state *sst, u8 const *src,
index a153762..4182111 100644 (file)
@@ -431,7 +431,7 @@ static int skcipher_copy_iv(struct skcipher_walk *walk)
 
 static int skcipher_walk_first(struct skcipher_walk *walk)
 {
-       if (WARN_ON_ONCE(in_irq()))
+       if (WARN_ON_ONCE(in_hardirq()))
                return -EDEADLK;
 
        walk->buffer = NULL;
index 016dbc5..4a6480a 100644 (file)
 #include <asm/byteorder.h>
 #include <asm/unaligned.h>
 
-static const u32 fk[4] = {
-       0xa3b1bac6, 0x56aa3350, 0x677d9197, 0xb27022dc
-};
-
-static const u8 sbox[256] = {
-       0xd6, 0x90, 0xe9, 0xfe, 0xcc, 0xe1, 0x3d, 0xb7,
-       0x16, 0xb6, 0x14, 0xc2, 0x28, 0xfb, 0x2c, 0x05,
-       0x2b, 0x67, 0x9a, 0x76, 0x2a, 0xbe, 0x04, 0xc3,
-       0xaa, 0x44, 0x13, 0x26, 0x49, 0x86, 0x06, 0x99,
-       0x9c, 0x42, 0x50, 0xf4, 0x91, 0xef, 0x98, 0x7a,
-       0x33, 0x54, 0x0b, 0x43, 0xed, 0xcf, 0xac, 0x62,
-       0xe4, 0xb3, 0x1c, 0xa9, 0xc9, 0x08, 0xe8, 0x95,
-       0x80, 0xdf, 0x94, 0xfa, 0x75, 0x8f, 0x3f, 0xa6,
-       0x47, 0x07, 0xa7, 0xfc, 0xf3, 0x73, 0x17, 0xba,
-       0x83, 0x59, 0x3c, 0x19, 0xe6, 0x85, 0x4f, 0xa8,
-       0x68, 0x6b, 0x81, 0xb2, 0x71, 0x64, 0xda, 0x8b,
-       0xf8, 0xeb, 0x0f, 0x4b, 0x70, 0x56, 0x9d, 0x35,
-       0x1e, 0x24, 0x0e, 0x5e, 0x63, 0x58, 0xd1, 0xa2,
-       0x25, 0x22, 0x7c, 0x3b, 0x01, 0x21, 0x78, 0x87,
-       0xd4, 0x00, 0x46, 0x57, 0x9f, 0xd3, 0x27, 0x52,
-       0x4c, 0x36, 0x02, 0xe7, 0xa0, 0xc4, 0xc8, 0x9e,
-       0xea, 0xbf, 0x8a, 0xd2, 0x40, 0xc7, 0x38, 0xb5,
-       0xa3, 0xf7, 0xf2, 0xce, 0xf9, 0x61, 0x15, 0xa1,
-       0xe0, 0xae, 0x5d, 0xa4, 0x9b, 0x34, 0x1a, 0x55,
-       0xad, 0x93, 0x32, 0x30, 0xf5, 0x8c, 0xb1, 0xe3,
-       0x1d, 0xf6, 0xe2, 0x2e, 0x82, 0x66, 0xca, 0x60,
-       0xc0, 0x29, 0x23, 0xab, 0x0d, 0x53, 0x4e, 0x6f,
-       0xd5, 0xdb, 0x37, 0x45, 0xde, 0xfd, 0x8e, 0x2f,
-       0x03, 0xff, 0x6a, 0x72, 0x6d, 0x6c, 0x5b, 0x51,
-       0x8d, 0x1b, 0xaf, 0x92, 0xbb, 0xdd, 0xbc, 0x7f,
-       0x11, 0xd9, 0x5c, 0x41, 0x1f, 0x10, 0x5a, 0xd8,
-       0x0a, 0xc1, 0x31, 0x88, 0xa5, 0xcd, 0x7b, 0xbd,
-       0x2d, 0x74, 0xd0, 0x12, 0xb8, 0xe5, 0xb4, 0xb0,
-       0x89, 0x69, 0x97, 0x4a, 0x0c, 0x96, 0x77, 0x7e,
-       0x65, 0xb9, 0xf1, 0x09, 0xc5, 0x6e, 0xc6, 0x84,
-       0x18, 0xf0, 0x7d, 0xec, 0x3a, 0xdc, 0x4d, 0x20,
-       0x79, 0xee, 0x5f, 0x3e, 0xd7, 0xcb, 0x39, 0x48
-};
-
-static const u32 ck[] = {
-       0x00070e15, 0x1c232a31, 0x383f464d, 0x545b6269,
-       0x70777e85, 0x8c939aa1, 0xa8afb6bd, 0xc4cbd2d9,
-       0xe0e7eef5, 0xfc030a11, 0x181f262d, 0x343b4249,
-       0x50575e65, 0x6c737a81, 0x888f969d, 0xa4abb2b9,
-       0xc0c7ced5, 0xdce3eaf1, 0xf8ff060d, 0x141b2229,
-       0x30373e45, 0x4c535a61, 0x686f767d, 0x848b9299,
-       0xa0a7aeb5, 0xbcc3cad1, 0xd8dfe6ed, 0xf4fb0209,
-       0x10171e25, 0x2c333a41, 0x484f565d, 0x646b7279
-};
-
-static u32 sm4_t_non_lin_sub(u32 x)
-{
-       int i;
-       u8 *b = (u8 *)&x;
-
-       for (i = 0; i < 4; ++i)
-               b[i] = sbox[b[i]];
-
-       return x;
-}
-
-static u32 sm4_key_lin_sub(u32 x)
-{
-       return x ^ rol32(x, 13) ^ rol32(x, 23);
-
-}
-
-static u32 sm4_enc_lin_sub(u32 x)
-{
-       return x ^ rol32(x, 2) ^ rol32(x, 10) ^ rol32(x, 18) ^ rol32(x, 24);
-}
-
-static u32 sm4_key_sub(u32 x)
-{
-       return sm4_key_lin_sub(sm4_t_non_lin_sub(x));
-}
-
-static u32 sm4_enc_sub(u32 x)
-{
-       return sm4_enc_lin_sub(sm4_t_non_lin_sub(x));
-}
-
-static u32 sm4_round(const u32 *x, const u32 rk)
-{
-       return x[0] ^ sm4_enc_sub(x[1] ^ x[2] ^ x[3] ^ rk);
-}
-
-
 /**
- * crypto_sm4_expand_key - Expands the SM4 key as described in GB/T 32907-2016
- * @ctx:       The location where the computed key will be stored.
- * @in_key:    The supplied key.
- * @key_len:   The length of the supplied key.
- *
- * Returns 0 on success. The function fails only if an invalid key size (or
- * pointer) is supplied.
- */
-int crypto_sm4_expand_key(struct crypto_sm4_ctx *ctx, const u8 *in_key,
-                         unsigned int key_len)
-{
-       u32 rk[4], t;
-       const u32 *key = (u32 *)in_key;
-       int i;
-
-       if (key_len != SM4_KEY_SIZE)
-               return -EINVAL;
-
-       for (i = 0; i < 4; ++i)
-               rk[i] = get_unaligned_be32(&key[i]) ^ fk[i];
-
-       for (i = 0; i < 32; ++i) {
-               t = rk[0] ^ sm4_key_sub(rk[1] ^ rk[2] ^ rk[3] ^ ck[i]);
-               ctx->rkey_enc[i] = t;
-               rk[0] = rk[1];
-               rk[1] = rk[2];
-               rk[2] = rk[3];
-               rk[3] = t;
-       }
-
-       for (i = 0; i < 32; ++i)
-               ctx->rkey_dec[i] = ctx->rkey_enc[31 - i];
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(crypto_sm4_expand_key);
-
-/**
- * crypto_sm4_set_key - Set the SM4 key.
+ * sm4_setkey - Set the SM4 key.
  * @tfm:       The %crypto_tfm that is used in the context.
  * @in_key:    The input key.
  * @key_len:   The size of the key.
  *
- * This function uses crypto_sm4_expand_key() to expand the key.
- * &crypto_sm4_ctx _must_ be the private data embedded in @tfm which is
+ * This function uses sm4_expandkey() to expand the key.
+ * &sm4_ctx _must_ be the private data embedded in @tfm which is
  * retrieved with crypto_tfm_ctx().
  *
  * Return: 0 on success; -EINVAL on failure (only happens for bad key lengths)
  */
-int crypto_sm4_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+static int sm4_setkey(struct crypto_tfm *tfm, const u8 *in_key,
                       unsigned int key_len)
 {
-       struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm);
-
-       return crypto_sm4_expand_key(ctx, in_key, key_len);
-}
-EXPORT_SYMBOL_GPL(crypto_sm4_set_key);
-
-static void sm4_do_crypt(const u32 *rk, u32 *out, const u32 *in)
-{
-       u32 x[4], i, t;
-
-       for (i = 0; i < 4; ++i)
-               x[i] = get_unaligned_be32(&in[i]);
-
-       for (i = 0; i < 32; ++i) {
-               t = sm4_round(x, rk[i]);
-               x[0] = x[1];
-               x[1] = x[2];
-               x[2] = x[3];
-               x[3] = t;
-       }
+       struct sm4_ctx *ctx = crypto_tfm_ctx(tfm);
 
-       for (i = 0; i < 4; ++i)
-               put_unaligned_be32(x[3 - i], &out[i]);
+       return sm4_expandkey(ctx, in_key, key_len);
 }
 
 /* encrypt a block of text */
 
-void crypto_sm4_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+static void sm4_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
 {
-       const struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm);
+       const struct sm4_ctx *ctx = crypto_tfm_ctx(tfm);
 
-       sm4_do_crypt(ctx->rkey_enc, (u32 *)out, (u32 *)in);
+       sm4_crypt_block(ctx->rkey_enc, out, in);
 }
-EXPORT_SYMBOL_GPL(crypto_sm4_encrypt);
 
 /* decrypt a block of text */
 
-void crypto_sm4_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+static void sm4_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
 {
-       const struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm);
+       const struct sm4_ctx *ctx = crypto_tfm_ctx(tfm);
 
-       sm4_do_crypt(ctx->rkey_dec, (u32 *)out, (u32 *)in);
+       sm4_crypt_block(ctx->rkey_dec, out, in);
 }
-EXPORT_SYMBOL_GPL(crypto_sm4_decrypt);
 
 static struct crypto_alg sm4_alg = {
        .cra_name               =       "sm4",
@@ -208,15 +60,15 @@ static struct crypto_alg sm4_alg = {
        .cra_priority           =       100,
        .cra_flags              =       CRYPTO_ALG_TYPE_CIPHER,
        .cra_blocksize          =       SM4_BLOCK_SIZE,
-       .cra_ctxsize            =       sizeof(struct crypto_sm4_ctx),
+       .cra_ctxsize            =       sizeof(struct sm4_ctx),
        .cra_module             =       THIS_MODULE,
        .cra_u                  =       {
                .cipher = {
                        .cia_min_keysize        =       SM4_KEY_SIZE,
                        .cia_max_keysize        =       SM4_KEY_SIZE,
-                       .cia_setkey             =       crypto_sm4_set_key,
-                       .cia_encrypt            =       crypto_sm4_encrypt,
-                       .cia_decrypt            =       crypto_sm4_decrypt
+                       .cia_setkey             =       sm4_setkey,
+                       .cia_encrypt            =       sm4_encrypt,
+                       .cia_decrypt            =       sm4_decrypt
                }
        }
 };
index f8d06da..82b0400 100644 (file)
@@ -77,7 +77,7 @@ static const char *check[] = {
        NULL
 };
 
-static const int block_sizes[] = { 16, 64, 256, 1024, 1420, 4096, 0 };
+static const int block_sizes[] = { 16, 64, 128, 256, 1024, 1420, 4096, 0 };
 static const int aead_sizes[] = { 16, 64, 256, 512, 1024, 1420, 4096, 8192, 0 };
 
 #define XBUFSIZE 8
@@ -290,6 +290,11 @@ static void test_mb_aead_speed(const char *algo, int enc, int secs,
        }
 
        ret = crypto_aead_setauthsize(tfm, authsize);
+       if (ret) {
+               pr_err("alg: aead: Failed to setauthsize for %s: %d\n", algo,
+                      ret);
+               goto out_free_tfm;
+       }
 
        for (i = 0; i < num_mb; ++i)
                if (testmgr_alloc_buf(data[i].xbuf)) {
@@ -315,7 +320,7 @@ static void test_mb_aead_speed(const char *algo, int enc, int secs,
        for (i = 0; i < num_mb; ++i) {
                data[i].req = aead_request_alloc(tfm, GFP_KERNEL);
                if (!data[i].req) {
-                       pr_err("alg: skcipher: Failed to allocate request for %s\n",
+                       pr_err("alg: aead: Failed to allocate request for %s\n",
                               algo);
                        while (i--)
                                aead_request_free(data[i].req);
@@ -567,13 +572,19 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
        sgout = &sg[9];
 
        tfm = crypto_alloc_aead(algo, 0, 0);
-
        if (IS_ERR(tfm)) {
                pr_err("alg: aead: Failed to load transform for %s: %ld\n", algo,
                       PTR_ERR(tfm));
                goto out_notfm;
        }
 
+       ret = crypto_aead_setauthsize(tfm, authsize);
+       if (ret) {
+               pr_err("alg: aead: Failed to setauthsize for %s: %d\n", algo,
+                      ret);
+               goto out_noreq;
+       }
+
        crypto_init_wait(&wait);
        printk(KERN_INFO "\ntesting speed of %s (%s) %s\n", algo,
                        get_driver_name(crypto_aead, tfm), e);
@@ -611,8 +622,13 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
                                        break;
                                }
                        }
+
                        ret = crypto_aead_setkey(tfm, key, *keysize);
-                       ret = crypto_aead_setauthsize(tfm, authsize);
+                       if (ret) {
+                               pr_err("setkey() failed flags=%x: %d\n",
+                                       crypto_aead_get_flags(tfm), ret);
+                               goto out;
+                       }
 
                        iv_len = crypto_aead_ivsize(tfm);
                        if (iv_len)
@@ -622,15 +638,8 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
                        printk(KERN_INFO "test %u (%d bit key, %d byte blocks): ",
                                        i, *keysize * 8, bs);
 
-
                        memset(tvmem[0], 0xff, PAGE_SIZE);
 
-                       if (ret) {
-                               pr_err("setkey() failed flags=%x\n",
-                                               crypto_aead_get_flags(tfm));
-                               goto out;
-                       }
-
                        sg_init_aead(sg, xbuf, bs + (enc ? 0 : authsize),
                                     assoc, aad_size);
 
@@ -1907,6 +1916,14 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
                ret += tcrypt_test("streebog512");
                break;
 
+       case 55:
+               ret += tcrypt_test("gcm(sm4)");
+               break;
+
+       case 56:
+               ret += tcrypt_test("ccm(sm4)");
+               break;
+
        case 100:
                ret += tcrypt_test("hmac(md5)");
                break;
@@ -1998,6 +2015,15 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
        case 157:
                ret += tcrypt_test("authenc(hmac(sha1),ecb(cipher_null))");
                break;
+
+       case 158:
+               ret += tcrypt_test("cbcmac(sm4)");
+               break;
+
+       case 159:
+               ret += tcrypt_test("cmac(sm4)");
+               break;
+
        case 181:
                ret += tcrypt_test("authenc(hmac(sha1),cbc(des))");
                break;
@@ -2031,6 +2057,7 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
        case 191:
                ret += tcrypt_test("ecb(sm4)");
                ret += tcrypt_test("cbc(sm4)");
+               ret += tcrypt_test("cfb(sm4)");
                ret += tcrypt_test("ctr(sm4)");
                break;
        case 200:
@@ -2289,6 +2316,10 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
                                speed_template_16);
                test_cipher_speed("cbc(sm4)", DECRYPT, sec, NULL, 0,
                                speed_template_16);
+               test_cipher_speed("cfb(sm4)", ENCRYPT, sec, NULL, 0,
+                               speed_template_16);
+               test_cipher_speed("cfb(sm4)", DECRYPT, sec, NULL, 0,
+                               speed_template_16);
                test_cipher_speed("ctr(sm4)", ENCRYPT, sec, NULL, 0,
                                speed_template_16);
                test_cipher_speed("ctr(sm4)", DECRYPT, sec, NULL, 0,
@@ -2322,6 +2353,34 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
                                NULL, 0, 16, 8, speed_template_16);
                break;
 
+       case 222:
+               test_aead_speed("gcm(sm4)", ENCRYPT, sec,
+                               NULL, 0, 16, 8, speed_template_16);
+               test_aead_speed("gcm(sm4)", DECRYPT, sec,
+                               NULL, 0, 16, 8, speed_template_16);
+               break;
+
+       case 223:
+               test_aead_speed("rfc4309(ccm(sm4))", ENCRYPT, sec,
+                               NULL, 0, 16, 16, aead_speed_template_19);
+               test_aead_speed("rfc4309(ccm(sm4))", DECRYPT, sec,
+                               NULL, 0, 16, 16, aead_speed_template_19);
+               break;
+
+       case 224:
+               test_mb_aead_speed("gcm(sm4)", ENCRYPT, sec, NULL, 0, 16, 8,
+                                  speed_template_16, num_mb);
+               test_mb_aead_speed("gcm(sm4)", DECRYPT, sec, NULL, 0, 16, 8,
+                                  speed_template_16, num_mb);
+               break;
+
+       case 225:
+               test_mb_aead_speed("rfc4309(ccm(sm4))", ENCRYPT, sec, NULL, 0,
+                                  16, 16, aead_speed_template_19, num_mb);
+               test_mb_aead_speed("rfc4309(ccm(sm4))", DECRYPT, sec, NULL, 0,
+                                  16, 16, aead_speed_template_19, num_mb);
+               break;
+
        case 300:
                if (alg) {
                        test_hash_speed(alg, sec, generic_hash_speed_template);
@@ -2757,6 +2816,25 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
                                   speed_template_8_32);
                break;
 
+       case 518:
+               test_acipher_speed("ecb(sm4)", ENCRYPT, sec, NULL, 0,
+                               speed_template_16);
+               test_acipher_speed("ecb(sm4)", DECRYPT, sec, NULL, 0,
+                               speed_template_16);
+               test_acipher_speed("cbc(sm4)", ENCRYPT, sec, NULL, 0,
+                               speed_template_16);
+               test_acipher_speed("cbc(sm4)", DECRYPT, sec, NULL, 0,
+                               speed_template_16);
+               test_acipher_speed("cfb(sm4)", ENCRYPT, sec, NULL, 0,
+                               speed_template_16);
+               test_acipher_speed("cfb(sm4)", DECRYPT, sec, NULL, 0,
+                               speed_template_16);
+               test_acipher_speed("ctr(sm4)", ENCRYPT, sec, NULL, 0,
+                               speed_template_16);
+               test_acipher_speed("ctr(sm4)", DECRYPT, sec, NULL, 0,
+                               speed_template_16);
+               break;
+
        case 600:
                test_mb_skcipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
                                       speed_template_16_24_32, num_mb);
index c978e41..70f69f0 100644 (file)
@@ -4450,6 +4450,12 @@ static const struct alg_test_desc alg_test_descs[] = {
                .suite = {
                        .hash = __VECS(aes_cbcmac_tv_template)
                }
+       }, {
+               .alg = "cbcmac(sm4)",
+               .test = alg_test_hash,
+               .suite = {
+                       .hash = __VECS(sm4_cbcmac_tv_template)
+               }
        }, {
                .alg = "ccm(aes)",
                .generic_driver = "ccm_base(ctr(aes-generic),cbcmac(aes-generic))",
@@ -4461,6 +4467,16 @@ static const struct alg_test_desc alg_test_descs[] = {
                                .einval_allowed = 1,
                        }
                }
+       }, {
+               .alg = "ccm(sm4)",
+               .generic_driver = "ccm_base(ctr(sm4-generic),cbcmac(sm4-generic))",
+               .test = alg_test_aead,
+               .suite = {
+                       .aead = {
+                               ____VECS(sm4_ccm_tv_template),
+                               .einval_allowed = 1,
+                       }
+               }
        }, {
                .alg = "cfb(aes)",
                .test = alg_test_skcipher,
@@ -4494,6 +4510,12 @@ static const struct alg_test_desc alg_test_descs[] = {
                .suite = {
                        .hash = __VECS(des3_ede_cmac64_tv_template)
                }
+       }, {
+               .alg = "cmac(sm4)",
+               .test = alg_test_hash,
+               .suite = {
+                       .hash = __VECS(sm4_cmac128_tv_template)
+               }
        }, {
                .alg = "compress_null",
                .test = alg_test_null,
@@ -4967,6 +4989,13 @@ static const struct alg_test_desc alg_test_descs[] = {
                .suite = {
                        .aead = __VECS(aes_gcm_tv_template)
                }
+       }, {
+               .alg = "gcm(sm4)",
+               .generic_driver = "gcm_base(ctr(sm4-generic),ghash-generic)",
+               .test = alg_test_aead,
+               .suite = {
+                       .aead = __VECS(sm4_gcm_tv_template)
+               }
        }, {
                .alg = "ghash",
                .test = alg_test_hash,
index 3ed6ab3..e6fca34 100644 (file)
@@ -13328,6 +13328,154 @@ static const struct cipher_testvec sm4_cfb_tv_template[] = {
        }
 };
 
+static const struct aead_testvec sm4_gcm_tv_template[] = {
+       { /* From https://datatracker.ietf.org/doc/html/rfc8998#appendix-A.1 */
+               .key    = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
+                         "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
+               .klen   = 16,
+               .iv     = "\x00\x00\x12\x34\x56\x78\x00\x00"
+                         "\x00\x00\xAB\xCD",
+               .ptext  = "\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA"
+                         "\xBB\xBB\xBB\xBB\xBB\xBB\xBB\xBB"
+                         "\xCC\xCC\xCC\xCC\xCC\xCC\xCC\xCC"
+                         "\xDD\xDD\xDD\xDD\xDD\xDD\xDD\xDD"
+                         "\xEE\xEE\xEE\xEE\xEE\xEE\xEE\xEE"
+                         "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"
+                         "\xEE\xEE\xEE\xEE\xEE\xEE\xEE\xEE"
+                         "\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA",
+               .plen   = 64,
+               .assoc  = "\xFE\xED\xFA\xCE\xDE\xAD\xBE\xEF"
+                         "\xFE\xED\xFA\xCE\xDE\xAD\xBE\xEF"
+                         "\xAB\xAD\xDA\xD2",
+               .alen   = 20,
+               .ctext  = "\x17\xF3\x99\xF0\x8C\x67\xD5\xEE"
+                         "\x19\xD0\xDC\x99\x69\xC4\xBB\x7D"
+                         "\x5F\xD4\x6F\xD3\x75\x64\x89\x06"
+                         "\x91\x57\xB2\x82\xBB\x20\x07\x35"
+                         "\xD8\x27\x10\xCA\x5C\x22\xF0\xCC"
+                         "\xFA\x7C\xBF\x93\xD4\x96\xAC\x15"
+                         "\xA5\x68\x34\xCB\xCF\x98\xC3\x97"
+                         "\xB4\x02\x4A\x26\x91\x23\x3B\x8D"
+                         "\x83\xDE\x35\x41\xE4\xC2\xB5\x81"
+                         "\x77\xE0\x65\xA9\xBF\x7B\x62\xEC",
+               .clen   = 80,
+       }
+};
+
+static const struct aead_testvec sm4_ccm_tv_template[] = {
+       { /* From https://datatracker.ietf.org/doc/html/rfc8998#appendix-A.2 */
+               .key    = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
+                         "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
+               .klen   = 16,
+               .iv     = "\x02\x00\x00\x12\x34\x56\x78\x00"
+                         "\x00\x00\x00\xAB\xCD\x00\x00\x00",
+               .ptext  = "\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA"
+                         "\xBB\xBB\xBB\xBB\xBB\xBB\xBB\xBB"
+                         "\xCC\xCC\xCC\xCC\xCC\xCC\xCC\xCC"
+                         "\xDD\xDD\xDD\xDD\xDD\xDD\xDD\xDD"
+                         "\xEE\xEE\xEE\xEE\xEE\xEE\xEE\xEE"
+                         "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"
+                         "\xEE\xEE\xEE\xEE\xEE\xEE\xEE\xEE"
+                         "\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA",
+               .plen   = 64,
+               .assoc  = "\xFE\xED\xFA\xCE\xDE\xAD\xBE\xEF"
+                         "\xFE\xED\xFA\xCE\xDE\xAD\xBE\xEF"
+                         "\xAB\xAD\xDA\xD2",
+               .alen   = 20,
+               .ctext  = "\x48\xAF\x93\x50\x1F\xA6\x2A\xDB"
+                         "\xCD\x41\x4C\xCE\x60\x34\xD8\x95"
+                         "\xDD\xA1\xBF\x8F\x13\x2F\x04\x20"
+                         "\x98\x66\x15\x72\xE7\x48\x30\x94"
+                         "\xFD\x12\xE5\x18\xCE\x06\x2C\x98"
+                         "\xAC\xEE\x28\xD9\x5D\xF4\x41\x6B"
+                         "\xED\x31\xA2\xF0\x44\x76\xC1\x8B"
+                         "\xB4\x0C\x84\xA7\x4B\x97\xDC\x5B"
+                         "\x16\x84\x2D\x4F\xA1\x86\xF5\x6A"
+                         "\xB3\x32\x56\x97\x1F\xA1\x10\xF4",
+               .clen   = 80,
+       }
+};
+
+static const struct hash_testvec sm4_cbcmac_tv_template[] = {
+       {
+               .key            = "\xff\xee\xdd\xcc\xbb\xaa\x99\x88"
+                                 "\x77\x66\x55\x44\x33\x22\x11\x00",
+               .plaintext      = "\x01\x23\x45\x67\x89\xab\xcd\xef"
+                                 "\xfe\xdc\xba\x98\x76\x54\x32\x10",
+               .digest         = "\x97\xb4\x75\x8f\x84\x92\x3d\x3f"
+                                 "\x86\x81\x0e\x0e\xea\x14\x6d\x73",
+               .psize          = 16,
+               .ksize          = 16,
+       }, {
+               .key            = "\x01\x23\x45\x67\x89\xab\xcd\xef"
+                                 "\xfe\xdc\xBA\x98\x76\x54\x32\x10",
+               .plaintext      = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+                                 "\xbb\xbb\xbb\xbb\xbb\xbb\xbb\xbb"
+                                 "\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc"
+                                 "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
+                                 "\xee",
+               .digest         = "\xc7\xdb\x17\x71\xa1\x5c\x0d\x22"
+                                 "\xa3\x39\x3a\x31\x88\x91\x49\xa1",
+               .psize          = 33,
+               .ksize          = 16,
+       }, {
+               .key            = "\x01\x23\x45\x67\x89\xab\xcd\xef"
+                                 "\xfe\xdc\xBA\x98\x76\x54\x32\x10",
+               .plaintext      = "\xfb\xd1\xbe\x92\x7e\x50\x3f\x16"
+                                 "\xf9\xdd\xbe\x91\x73\x53\x37\x1a"
+                                 "\xfe\xdd\xba\x97\x7e\x53\x3c\x1c"
+                                 "\xfe\xd7\xbf\x9c\x75\x5f\x3e\x11"
+                                 "\xf0\xd8\xbc\x96\x73\x5c\x34\x11"
+                                 "\xf5\xdb\xb1\x99\x7a\x5a\x32\x1f"
+                                 "\xf6\xdf\xb4\x95\x7f\x5f\x3b\x17"
+                                 "\xfd\xdb\xb1\x9b\x76\x5c\x37",
+               .digest         = "\x9b\x07\x88\x7f\xd5\x95\x23\x12"
+                                 "\x64\x0a\x66\x7f\x4e\x25\xca\xd0",
+               .psize          = 63,
+               .ksize          = 16,
+       }
+};
+
+static const struct hash_testvec sm4_cmac128_tv_template[] = {
+       {
+               .key            = "\xff\xee\xdd\xcc\xbb\xaa\x99\x88"
+                                 "\x77\x66\x55\x44\x33\x22\x11\x00",
+               .plaintext      = "\x01\x23\x45\x67\x89\xab\xcd\xef"
+                                 "\xfe\xdc\xba\x98\x76\x54\x32\x10",
+               .digest         = "\x00\xd4\x63\xb4\x9a\xf3\x52\xe2"
+                                 "\x74\xa9\x00\x55\x13\x54\x2a\xd1",
+               .psize          = 16,
+               .ksize          = 16,
+       }, {
+               .key            = "\x01\x23\x45\x67\x89\xab\xcd\xef"
+                                 "\xfe\xdc\xBA\x98\x76\x54\x32\x10",
+               .plaintext      = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+                                 "\xbb\xbb\xbb\xbb\xbb\xbb\xbb\xbb"
+                                 "\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc"
+                                 "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
+                                 "\xee",
+               .digest         = "\x8a\x8a\xe9\xc0\xc8\x97\x0e\x85"
+                                 "\x21\x57\x02\x10\x1a\xbf\x9c\xc6",
+               .psize          = 33,
+               .ksize          = 16,
+       }, {
+               .key            = "\x01\x23\x45\x67\x89\xab\xcd\xef"
+                                 "\xfe\xdc\xBA\x98\x76\x54\x32\x10",
+               .plaintext      = "\xfb\xd1\xbe\x92\x7e\x50\x3f\x16"
+                                 "\xf9\xdd\xbe\x91\x73\x53\x37\x1a"
+                                 "\xfe\xdd\xba\x97\x7e\x53\x3c\x1c"
+                                 "\xfe\xd7\xbf\x9c\x75\x5f\x3e\x11"
+                                 "\xf0\xd8\xbc\x96\x73\x5c\x34\x11"
+                                 "\xf5\xdb\xb1\x99\x7a\x5a\x32\x1f"
+                                 "\xf6\xdf\xb4\x95\x7f\x5f\x3b\x17"
+                                 "\xfd\xdb\xb1\x9b\x76\x5c\x37",
+               .digest         = "\x5f\x14\xc9\xa9\x20\xb2\xb4\xf0"
+                                 "\x76\xe0\xd8\xd6\xdc\x4f\xe1\xbc",
+               .psize          = 63,
+               .ksize          = 16,
+       }
+};
+
 /* Cast6 test vectors from RFC 2612 */
 static const struct cipher_testvec cast6_tv_template[] = {
        {
index bf79fbb..5e820af 100644 (file)
@@ -775,7 +775,7 @@ static const u64 rc[WHIRLPOOL_ROUNDS] = {
        0xca2dbf07ad5a8333ULL,
 };
 
-/**
+/*
  * The core Whirlpool transform.
  */
 
index 8bad634..30d2db3 100644 (file)
@@ -51,8 +51,6 @@ source "drivers/net/Kconfig"
 
 source "drivers/isdn/Kconfig"
 
-source "drivers/lightnvm/Kconfig"
-
 # input before char - char/joystick depends on it. As does USB.
 
 source "drivers/input/Kconfig"
index 27c018b..be5d40a 100644 (file)
@@ -70,7 +70,6 @@ obj-$(CONFIG_FB_I810)           += video/fbdev/i810/
 obj-$(CONFIG_FB_INTEL)          += video/fbdev/intelfb/
 
 obj-$(CONFIG_PARPORT)          += parport/
-obj-$(CONFIG_NVM)              += lightnvm/
 obj-y                          += base/ block/ misc/ mfd/ nfc/
 obj-$(CONFIG_LIBNVDIMM)                += nvdimm/
 obj-$(CONFIG_DAX)              += dax/
index 38e10ab..14b71b4 100644 (file)
@@ -379,13 +379,6 @@ acpi_ns_repair_CID(struct acpi_evaluate_info *info,
 
                        (*element_ptr)->common.reference_count =
                            original_ref_count;
-
-                       /*
-                        * The original_element holds a reference from the package object
-                        * that represents _HID. Since a new element was created by _HID,
-                        * remove the reference from the _CID package.
-                        */
-                       acpi_ut_remove_reference(original_element);
                }
 
                element_ptr++;
index 5fca182..550b908 100644 (file)
@@ -9,6 +9,42 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 
+struct pch_fivr_resp {
+       u64 status;
+       u64 result;
+};
+
+static int pch_fivr_read(acpi_handle handle, char *method, struct pch_fivr_resp *fivr_resp)
+{
+       struct acpi_buffer resp = { sizeof(struct pch_fivr_resp), fivr_resp};
+       struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+       struct acpi_buffer format = { sizeof("NN"), "NN" };
+       union acpi_object *obj;
+       acpi_status status;
+       int ret = -EFAULT;
+
+       status = acpi_evaluate_object(handle, method, NULL, &buffer);
+       if (ACPI_FAILURE(status))
+               return ret;
+
+       obj = buffer.pointer;
+       if (!obj || obj->type != ACPI_TYPE_PACKAGE)
+               goto release_buffer;
+
+       status = acpi_extract_package(obj, &format, &resp);
+       if (ACPI_FAILURE(status))
+               goto release_buffer;
+
+       if (fivr_resp->status)
+               goto release_buffer;
+
+       ret = 0;
+
+release_buffer:
+       kfree(buffer.pointer);
+       return ret;
+}
+
 /*
  * Presentation of attributes which are defined for INT1045
  * They are:
@@ -23,15 +59,14 @@ static ssize_t name##_show(struct device *dev,\
                           char *buf)\
 {\
        struct acpi_device *acpi_dev = dev_get_drvdata(dev);\
-       unsigned long long val;\
-       acpi_status status;\
+       struct pch_fivr_resp fivr_resp;\
+       int status;\
 \
-       status = acpi_evaluate_integer(acpi_dev->handle, #method,\
-                                      NULL, &val);\
-       if (ACPI_SUCCESS(status))\
-               return sprintf(buf, "%d\n", (int)val);\
-       else\
-               return -EINVAL;\
+       status = pch_fivr_read(acpi_dev->handle, #method, &fivr_resp);\
+       if (status)\
+               return status;\
+\
+       return sprintf(buf, "%llu\n", fivr_resp.result);\
 }
 
 #define PCH_FIVR_STORE(name, method) \
index 23d9a09..a3ef6cc 100644 (file)
@@ -3021,6 +3021,9 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
                struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
                struct nd_mapping_desc *mapping;
 
+               /* range index 0 == unmapped in SPA or invalid-SPA */
+               if (memdev->range_index == 0 || spa->range_index == 0)
+                       continue;
                if (memdev->range_index != spa->range_index)
                        continue;
                if (count >= ND_MAX_MAPPINGS) {
index 31cf9ae..1f6007a 100644 (file)
@@ -292,6 +292,12 @@ void __init init_prmt(void)
        int mc = acpi_table_parse_entries(ACPI_SIG_PRMT, sizeof(struct acpi_table_prmt) +
                                          sizeof (struct acpi_table_prmt_header),
                                          0, acpi_parse_prmt, 0);
+       /*
+        * Return immediately if PRMT table is not present or no PRM module found.
+        */
+       if (mc <= 0)
+               return;
+
        pr_info("PRM: found %u modules\n", mc);
 
        status = acpi_install_address_space_handler(ACPI_ROOT_OBJECT,
index dc01fb5..ee78a21 100644 (file)
@@ -423,13 +423,6 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
        }
 }
 
-static bool irq_is_legacy(struct acpi_resource_irq *irq)
-{
-       return irq->triggering == ACPI_EDGE_SENSITIVE &&
-               irq->polarity == ACPI_ACTIVE_HIGH &&
-               irq->shareable == ACPI_EXCLUSIVE;
-}
-
 /**
  * acpi_dev_resource_interrupt - Extract ACPI interrupt resource information.
  * @ares: Input ACPI resource object.
@@ -468,7 +461,7 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
                }
                acpi_dev_get_irqresource(res, irq->interrupts[index],
                                         irq->triggering, irq->polarity,
-                                        irq->shareable, irq_is_legacy(irq));
+                                        irq->shareable, true);
                break;
        case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
                ext_irq = &ares->data.extended_irq;
index 1c50780..3a30846 100644 (file)
@@ -378,19 +378,25 @@ static int lps0_device_attach(struct acpi_device *adev,
                 * AMDI0006:
                 * - should use rev_id 0x0
                 * - function mask = 0x3: Should use Microsoft method
+                * AMDI0007:
+                * - Should use rev_id 0x2
+                * - Should only use AMD method
                 */
                const char *hid = acpi_device_hid(adev);
-               rev_id = 0;
+               rev_id = strcmp(hid, "AMDI0007") ? 0 : 2;
                lps0_dsm_func_mask = validate_dsm(adev->handle,
                                        ACPI_LPS0_DSM_UUID_AMD, rev_id, &lps0_dsm_guid);
                lps0_dsm_func_mask_microsoft = validate_dsm(adev->handle,
-                                       ACPI_LPS0_DSM_UUID_MICROSOFT, rev_id,
+                                       ACPI_LPS0_DSM_UUID_MICROSOFT, 0,
                                        &lps0_dsm_guid_microsoft);
                if (lps0_dsm_func_mask > 0x3 && (!strcmp(hid, "AMD0004") ||
                                                 !strcmp(hid, "AMDI0005"))) {
                        lps0_dsm_func_mask = (lps0_dsm_func_mask << 1) | 0x1;
                        acpi_handle_debug(adev->handle, "_DSM UUID %s: Adjusted function mask: 0x%x\n",
                                          ACPI_LPS0_DSM_UUID_AMD, lps0_dsm_func_mask);
+               } else if (lps0_dsm_func_mask_microsoft > 0 && !strcmp(hid, "AMDI0007")) {
+                       lps0_dsm_func_mask_microsoft = -EINVAL;
+                       acpi_handle_debug(adev->handle, "_DSM Using AMD method\n");
                }
        } else {
                rev_id = 1;
@@ -446,7 +452,7 @@ int acpi_s2idle_prepare_late(void)
        if (lps0_dsm_func_mask_microsoft > 0) {
                acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF,
                                lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
-               acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT,
+               acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY,
                                lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
                acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY,
                                lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
@@ -473,7 +479,7 @@ void acpi_s2idle_restore_early(void)
        if (lps0_dsm_func_mask_microsoft > 0) {
                acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT,
                                lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
-               acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY,
+               acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT,
                                lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
                acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON,
                                lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
index ae7189d..b71ea4a 100644 (file)
@@ -637,6 +637,20 @@ unsigned int ata_sff_data_xfer32(struct ata_queued_cmd *qc, unsigned char *buf,
 }
 EXPORT_SYMBOL_GPL(ata_sff_data_xfer32);
 
+static void ata_pio_xfer(struct ata_queued_cmd *qc, struct page *page,
+               unsigned int offset, size_t xfer_size)
+{
+       bool do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
+       unsigned char *buf;
+
+       buf = kmap_atomic(page);
+       qc->ap->ops->sff_data_xfer(qc, buf + offset, xfer_size, do_write);
+       kunmap_atomic(buf);
+
+       if (!do_write && !PageSlab(page))
+               flush_dcache_page(page);
+}
+
 /**
  *     ata_pio_sector - Transfer a sector of data.
  *     @qc: Command on going
@@ -648,11 +662,9 @@ EXPORT_SYMBOL_GPL(ata_sff_data_xfer32);
  */
 static void ata_pio_sector(struct ata_queued_cmd *qc)
 {
-       int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
        struct ata_port *ap = qc->ap;
        struct page *page;
        unsigned int offset;
-       unsigned char *buf;
 
        if (!qc->cursg) {
                qc->curbytes = qc->nbytes;
@@ -670,13 +682,20 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
 
        DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
 
-       /* do the actual data transfer */
-       buf = kmap_atomic(page);
-       ap->ops->sff_data_xfer(qc, buf + offset, qc->sect_size, do_write);
-       kunmap_atomic(buf);
+       /*
+        * Split the transfer when it splits a page boundary.  Note that the
+        * split still has to be dword aligned like all ATA data transfers.
+        */
+       WARN_ON_ONCE(offset % 4);
+       if (offset + qc->sect_size > PAGE_SIZE) {
+               unsigned int split_len = PAGE_SIZE - offset;
 
-       if (!do_write && !PageSlab(page))
-               flush_dcache_page(page);
+               ata_pio_xfer(qc, page, offset, split_len);
+               ata_pio_xfer(qc, nth_page(page, 1), 0,
+                            qc->sect_size - split_len);
+       } else {
+               ata_pio_xfer(qc, page, offset, qc->sect_size);
+       }
 
        qc->curbytes += qc->sect_size;
        qc->cursg_ofs += qc->sect_size;
index f636049..6c0ef9d 100644 (file)
@@ -2837,6 +2837,7 @@ void device_initialize(struct device *dev)
        device_pm_init(dev);
        set_dev_node(dev, -1);
 #ifdef CONFIG_GENERIC_MSI_IRQ
+       raw_spin_lock_init(&dev->msi_lock);
        INIT_LIST_HEAD(&dev->msi_list);
 #endif
        INIT_LIST_HEAD(&dev->links.consumers);
index daeb9b5..437cd61 100644 (file)
@@ -653,8 +653,6 @@ dev_groups_failed:
        else if (drv->remove)
                drv->remove(dev);
 probe_failed:
-       kfree(dev->dma_range_map);
-       dev->dma_range_map = NULL;
        if (dev->bus)
                blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
                                             BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
@@ -662,6 +660,8 @@ pinctrl_bind_failed:
        device_links_no_driver(dev);
        devres_release_all(dev);
        arch_teardown_dma_ops(dev);
+       kfree(dev->dma_range_map);
+       dev->dma_range_map = NULL;
        driver_sysfs_remove(dev);
        dev->driver = NULL;
        dev_set_drvdata(dev, NULL);
index 91899d1..d7d63c1 100644 (file)
@@ -89,12 +89,11 @@ static void __fw_load_abort(struct fw_priv *fw_priv)
 {
        /*
         * There is a small window in which user can write to 'loading'
-        * between loading done and disappearance of 'loading'
+        * between loading done/aborted and disappearance of 'loading'
         */
-       if (fw_sysfs_done(fw_priv))
+       if (fw_state_is_aborted(fw_priv) || fw_sysfs_done(fw_priv))
                return;
 
-       list_del_init(&fw_priv->pending_list);
        fw_state_aborted(fw_priv);
 }
 
@@ -280,7 +279,6 @@ static ssize_t firmware_loading_store(struct device *dev,
                         * Same logic as fw_load_abort, only the DONE bit
                         * is ignored and we set ABORT only on failure.
                         */
-                       list_del_init(&fw_priv->pending_list);
                        if (rc) {
                                fw_state_aborted(fw_priv);
                                written = rc;
@@ -513,6 +511,11 @@ static int fw_load_sysfs_fallback(struct fw_sysfs *fw_sysfs, long timeout)
        }
 
        mutex_lock(&fw_lock);
+       if (fw_state_is_aborted(fw_priv)) {
+               mutex_unlock(&fw_lock);
+               retval = -EINTR;
+               goto out;
+       }
        list_add(&fw_priv->pending_list, &pending_fw_head);
        mutex_unlock(&fw_lock);
 
@@ -535,11 +538,10 @@ static int fw_load_sysfs_fallback(struct fw_sysfs *fw_sysfs, long timeout)
        if (fw_state_is_aborted(fw_priv)) {
                if (retval == -ERESTARTSYS)
                        retval = -EINTR;
-               else
-                       retval = -EAGAIN;
        } else if (fw_priv->is_paged_buf && !fw_priv->data)
                retval = -ENOMEM;
 
+out:
        device_del(f_dev);
 err_put_dev:
        put_device(f_dev);
index 63bd29f..a3014e9 100644 (file)
@@ -117,8 +117,16 @@ static inline void __fw_state_set(struct fw_priv *fw_priv,
 
        WRITE_ONCE(fw_st->status, status);
 
-       if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED)
+       if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED) {
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+               /*
+                * Doing this here ensures that the fw_priv is deleted from
+                * the pending list in all abort/done paths.
+                */
+               list_del_init(&fw_priv->pending_list);
+#endif
                complete_all(&fw_st->completion);
+       }
 }
 
 static inline void fw_state_aborted(struct fw_priv *fw_priv)
index 4fdb821..68c549d 100644 (file)
@@ -783,8 +783,10 @@ static void fw_abort_batch_reqs(struct firmware *fw)
                return;
 
        fw_priv = fw->priv;
+       mutex_lock(&fw_lock);
        if (!fw_state_is_aborted(fw_priv))
                fw_state_aborted(fw_priv);
+       mutex_unlock(&fw_lock);
 }
 
 /* called from request_firmware() and request_firmware_work_func() */
index 0b72b13..3d6c8f9 100644 (file)
  * and the callback to write the MSI message.
  */
 struct platform_msi_priv_data {
-       struct device           *dev;
-       void                    *host_data;
-       msi_alloc_info_t        arg;
-       irq_write_msi_msg_t     write_msg;
-       int                     devid;
+       struct device                   *dev;
+       void                            *host_data;
+       const struct attribute_group    **msi_irq_groups;
+       msi_alloc_info_t                arg;
+       irq_write_msi_msg_t             write_msg;
+       int                             devid;
 };
 
 /* The devid allocator */
@@ -272,8 +273,16 @@ int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
        if (err)
                goto out_free_desc;
 
+       priv_data->msi_irq_groups = msi_populate_sysfs(dev);
+       if (IS_ERR(priv_data->msi_irq_groups)) {
+               err = PTR_ERR(priv_data->msi_irq_groups);
+               goto out_free_irqs;
+       }
+
        return 0;
 
+out_free_irqs:
+       msi_domain_free_irqs(dev->msi_domain, dev);
 out_free_desc:
        platform_msi_free_descs(dev, 0, nvec);
 out_free_priv_data:
@@ -293,6 +302,7 @@ void platform_msi_domain_free_irqs(struct device *dev)
                struct msi_desc *desc;
 
                desc = first_msi_entry(dev);
+               msi_destroy_sysfs(dev, desc->platform.msi_priv_data->msi_irq_groups);
                platform_msi_free_priv_data(desc->platform.msi_priv_data);
        }
 
index a934c67..f10688e 100644 (file)
@@ -435,7 +435,7 @@ static void genpd_restore_performance_state(struct device *dev,
 int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
 {
        struct generic_pm_domain *genpd;
-       int ret;
+       int ret = 0;
 
        genpd = dev_to_genpd_safe(dev);
        if (!genpd)
@@ -446,7 +446,13 @@ int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
                return -EINVAL;
 
        genpd_lock(genpd);
-       ret = genpd_set_performance_state(dev, state);
+       if (pm_runtime_suspended(dev)) {
+               dev_gpd_data(dev)->rpm_pstate = state;
+       } else {
+               ret = genpd_set_performance_state(dev, state);
+               if (!ret)
+                       dev_gpd_data(dev)->rpm_pstate = 0;
+       }
        genpd_unlock(genpd);
 
        return ret;
index 0097696..b190591 100644 (file)
@@ -53,6 +53,10 @@ struct regmap {
                        spinlock_t spinlock;
                        unsigned long spinlock_flags;
                };
+               struct {
+                       raw_spinlock_t raw_spinlock;
+                       unsigned long raw_spinlock_flags;
+               };
        };
        regmap_lock lock;
        regmap_unlock unlock;
index 211a335..ad684d3 100644 (file)
@@ -368,7 +368,7 @@ static ssize_t regmap_reg_ranges_read_file(struct file *file,
        char *buf;
        char *entry;
        int ret;
-       unsigned entry_len;
+       unsigned int entry_len;
 
        if (*ppos < 0 || !count)
                return -EINVAL;
index f9cd51a..71f16be 100644 (file)
@@ -15,7 +15,7 @@
 
 struct regmap_mmio_context {
        void __iomem *regs;
-       unsigned val_bytes;
+       unsigned int val_bytes;
        bool relaxed_mmio;
 
        bool attached_clk;
index fe3e38d..21a0c25 100644 (file)
@@ -533,6 +533,23 @@ __releases(&map->spinlock)
        spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags);
 }
 
+static void regmap_lock_raw_spinlock(void *__map)
+__acquires(&map->raw_spinlock)
+{
+       struct regmap *map = __map;
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&map->raw_spinlock, flags);
+       map->raw_spinlock_flags = flags;
+}
+
+static void regmap_unlock_raw_spinlock(void *__map)
+__releases(&map->raw_spinlock)
+{
+       struct regmap *map = __map;
+       raw_spin_unlock_irqrestore(&map->raw_spinlock, map->raw_spinlock_flags);
+}
+
 static void dev_get_regmap_release(struct device *dev, void *res)
 {
        /*
@@ -770,11 +787,19 @@ struct regmap *__regmap_init(struct device *dev,
        } else {
                if ((bus && bus->fast_io) ||
                    config->fast_io) {
-                       spin_lock_init(&map->spinlock);
-                       map->lock = regmap_lock_spinlock;
-                       map->unlock = regmap_unlock_spinlock;
-                       lockdep_set_class_and_name(&map->spinlock,
-                                                  lock_key, lock_name);
+                       if (config->use_raw_spinlock) {
+                               raw_spin_lock_init(&map->raw_spinlock);
+                               map->lock = regmap_lock_raw_spinlock;
+                               map->unlock = regmap_unlock_raw_spinlock;
+                               lockdep_set_class_and_name(&map->raw_spinlock,
+                                                          lock_key, lock_name);
+                       } else {
+                               spin_lock_init(&map->spinlock);
+                               map->lock = regmap_lock_spinlock;
+                               map->unlock = regmap_unlock_spinlock;
+                               lockdep_set_class_and_name(&map->spinlock,
+                                                          lock_key, lock_name);
+                       }
                } else {
                        mutex_init(&map->mutex);
                        map->lock = regmap_lock_mutex;
@@ -1126,10 +1151,10 @@ skip_format_initialization:
                /* Make sure, that this register range has no selector
                   or data window within its boundary */
                for (j = 0; j < config->num_ranges; j++) {
-                       unsigned sel_reg = config->ranges[j].selector_reg;
-                       unsigned win_min = config->ranges[j].window_start;
-                       unsigned win_max = win_min +
-                                          config->ranges[j].window_len - 1;
+                       unsigned int sel_reg = config->ranges[j].selector_reg;
+                       unsigned int win_min = config->ranges[j].window_start;
+                       unsigned int win_max = win_min +
+                                              config->ranges[j].window_len - 1;
 
                        /* Allow data window inside its own virtual range */
                        if (j == i)
@@ -1298,7 +1323,7 @@ EXPORT_SYMBOL_GPL(devm_regmap_field_alloc);
  */
 int regmap_field_bulk_alloc(struct regmap *regmap,
                            struct regmap_field **rm_field,
-                           struct reg_field *reg_field,
+                           const struct reg_field *reg_field,
                            int num_fields)
 {
        struct regmap_field *rf;
@@ -1334,7 +1359,7 @@ EXPORT_SYMBOL_GPL(regmap_field_bulk_alloc);
 int devm_regmap_field_bulk_alloc(struct device *dev,
                                 struct regmap *regmap,
                                 struct regmap_field **rm_field,
-                                struct reg_field *reg_field,
+                                const struct reg_field *reg_field,
                                 int num_fields)
 {
        struct regmap_field *rf;
@@ -1667,7 +1692,7 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
                        if (ret) {
                                dev_err(map->dev,
                                        "Error in caching of register: %x ret: %d\n",
-                                       reg + i, ret);
+                                       reg + regmap_get_offset(map, i), ret);
                                return ret;
                        }
                }
index 63056cf..fbb3a55 100644 (file)
@@ -213,7 +213,7 @@ config BLK_DEV_LOOP_MIN_COUNT
          dynamically allocated with the /dev/loop-control interface.
 
 config BLK_DEV_CRYPTOLOOP
-       tristate "Cryptoloop Support"
+       tristate "Cryptoloop Support (DEPRECATED)"
        select CRYPTO
        select CRYPTO_CBC
        depends on BLK_DEV_LOOP
@@ -225,7 +225,7 @@ config BLK_DEV_CRYPTOLOOP
          WARNING: This device is not safe for journaled file systems like
          ext3 or Reiserfs. Please use the Device Mapper crypto module
          instead, which can be configured to be on-disk compatible with the
-         cryptoloop device.
+         cryptoloop device.  cryptoloop support will be removed in Linux 5.16.
 
 source "drivers/block/drbd/Kconfig"
 
index 9569411..58ec167 100644 (file)
@@ -27,9 +27,6 @@
 
 #include <linux/uaccess.h>
 
-#define PAGE_SECTORS_SHIFT     (PAGE_SHIFT - SECTOR_SHIFT)
-#define PAGE_SECTORS           (1 << PAGE_SECTORS_SHIFT)
-
 /*
  * Each block ramdisk device has a radix_tree brd_pages of pages that stores
  * the pages containing the block device's contents. A brd page's ->index is
index 3cabc33..f0a91fa 100644 (file)
@@ -189,6 +189,8 @@ init_cryptoloop(void)
 
        if (rc)
                printk(KERN_ERR "cryptoloop: loop_register_transfer failed\n");
+       else
+               pr_warn("the cryptoloop driver has been deprecated and will be removed in in Linux 5.16\n");
        return rc;
 }
 
index e7d0e63..44ccf8b 100644 (file)
@@ -1364,7 +1364,7 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
 
        if (b) {
                blk_stack_limits(&q->limits, &b->limits, 0);
-               blk_queue_update_readahead(q);
+               disk_update_readahead(device->vdisk);
        }
        fixup_discard_if_not_supported(q);
        fixup_write_zeroes(device, q);
index 13beb98..5ca2336 100644 (file)
@@ -905,13 +905,12 @@ static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector,
 static bool remote_due_to_read_balancing(struct drbd_device *device, sector_t sector,
                enum drbd_read_balancing rbm)
 {
-       struct backing_dev_info *bdi;
        int stripe_shift;
 
        switch (rbm) {
        case RB_CONGESTED_REMOTE:
-               bdi = device->ldev->backing_bdev->bd_disk->queue->backing_dev_info;
-               return bdi_read_congested(bdi);
+               return bdi_read_congested(
+                       device->ldev->backing_bdev->bd_disk->bdi);
        case RB_LEAST_PENDING:
                return atomic_read(&device->local_cnt) >
                        atomic_read(&device->ap_pending_cnt) + atomic_read(&device->rs_pending_cnt);
index 87460e0..fef79ea 100644 (file)
@@ -4029,23 +4029,23 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
        if (fdc_state[FDC(drive)].rawcmd == 1)
                fdc_state[FDC(drive)].rawcmd = 2;
 
-       if (mode & (FMODE_READ|FMODE_WRITE)) {
-               drive_state[drive].last_checked = 0;
-               clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &drive_state[drive].flags);
-               if (bdev_check_media_change(bdev))
-                       floppy_revalidate(bdev->bd_disk);
-               if (test_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags))
-                       goto out;
-               if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &drive_state[drive].flags))
+       if (!(mode & FMODE_NDELAY)) {
+               if (mode & (FMODE_READ|FMODE_WRITE)) {
+                       drive_state[drive].last_checked = 0;
+                       clear_bit(FD_OPEN_SHOULD_FAIL_BIT,
+                                 &drive_state[drive].flags);
+                       if (bdev_check_media_change(bdev))
+                               floppy_revalidate(bdev->bd_disk);
+                       if (test_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags))
+                               goto out;
+                       if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &drive_state[drive].flags))
+                               goto out;
+               }
+               res = -EROFS;
+               if ((mode & FMODE_WRITE) &&
+                   !test_bit(FD_DISK_WRITABLE_BIT, &drive_state[drive].flags))
                        goto out;
        }
-
-       res = -EROFS;
-
-       if ((mode & FMODE_WRITE) &&
-                       !test_bit(FD_DISK_WRITABLE_BIT, &drive_state[drive].flags))
-               goto out;
-
        mutex_unlock(&open_lock);
        mutex_unlock(&floppy_mutex);
        return 0;
index f37b9e3..fa1c298 100644 (file)
 
 static DEFINE_IDR(loop_index_idr);
 static DEFINE_MUTEX(loop_ctl_mutex);
+static DEFINE_MUTEX(loop_validate_mutex);
+
+/**
+ * loop_global_lock_killable() - take locks for safe loop_validate_file() test
+ *
+ * @lo: struct loop_device
+ * @global: true if @lo is about to bind another "struct loop_device", false otherwise
+ *
+ * Returns 0 on success, -EINTR otherwise.
+ *
+ * Since loop_validate_file() traverses on other "struct loop_device" if
+ * is_loop_device() is true, we need a global lock for serializing concurrent
+ * loop_configure()/loop_change_fd()/__loop_clr_fd() calls.
+ */
+static int loop_global_lock_killable(struct loop_device *lo, bool global)
+{
+       int err;
+
+       if (global) {
+               err = mutex_lock_killable(&loop_validate_mutex);
+               if (err)
+                       return err;
+       }
+       err = mutex_lock_killable(&lo->lo_mutex);
+       if (err && global)
+               mutex_unlock(&loop_validate_mutex);
+       return err;
+}
+
+/**
+ * loop_global_unlock() - release locks taken by loop_global_lock_killable()
+ *
+ * @lo: struct loop_device
+ * @global: true if @lo was about to bind another "struct loop_device", false otherwise
+ */
+static void loop_global_unlock(struct loop_device *lo, bool global)
+{
+       mutex_unlock(&lo->lo_mutex);
+       if (global)
+               mutex_unlock(&loop_validate_mutex);
+}
 
 static int max_part;
 static int part_shift;
@@ -672,13 +713,15 @@ static int loop_validate_file(struct file *file, struct block_device *bdev)
        while (is_loop_device(f)) {
                struct loop_device *l;
 
+               lockdep_assert_held(&loop_validate_mutex);
                if (f->f_mapping->host->i_rdev == bdev->bd_dev)
                        return -EBADF;
 
                l = I_BDEV(f->f_mapping->host)->bd_disk->private_data;
-               if (l->lo_state != Lo_bound) {
+               if (l->lo_state != Lo_bound)
                        return -EINVAL;
-               }
+               /* Order wrt setting lo->lo_backing_file in loop_configure(). */
+               rmb();
                f = l->lo_backing_file;
        }
        if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
@@ -697,13 +740,18 @@ static int loop_validate_file(struct file *file, struct block_device *bdev)
 static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
                          unsigned int arg)
 {
-       struct file     *file = NULL, *old_file;
-       int             error;
-       bool            partscan;
+       struct file *file = fget(arg);
+       struct file *old_file;
+       int error;
+       bool partscan;
+       bool is_loop;
 
-       error = mutex_lock_killable(&lo->lo_mutex);
+       if (!file)
+               return -EBADF;
+       is_loop = is_loop_device(file);
+       error = loop_global_lock_killable(lo, is_loop);
        if (error)
-               return error;
+               goto out_putf;
        error = -ENXIO;
        if (lo->lo_state != Lo_bound)
                goto out_err;
@@ -713,11 +761,6 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
        if (!(lo->lo_flags & LO_FLAGS_READ_ONLY))
                goto out_err;
 
-       error = -EBADF;
-       file = fget(arg);
-       if (!file)
-               goto out_err;
-
        error = loop_validate_file(file, bdev);
        if (error)
                goto out_err;
@@ -731,6 +774,7 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
                goto out_err;
 
        /* and ... switch */
+       disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE);
        blk_mq_freeze_queue(lo->lo_queue);
        mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
        lo->lo_backing_file = file;
@@ -740,7 +784,16 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
        loop_update_dio(lo);
        blk_mq_unfreeze_queue(lo->lo_queue);
        partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
-       mutex_unlock(&lo->lo_mutex);
+       loop_global_unlock(lo, is_loop);
+
+       /*
+        * Flush loop_validate_file() before fput(), for l->lo_backing_file
+        * might be pointing at old_file which might be the last reference.
+        */
+       if (!is_loop) {
+               mutex_lock(&loop_validate_mutex);
+               mutex_unlock(&loop_validate_mutex);
+       }
        /*
         * We must drop file reference outside of lo_mutex as dropping
         * the file ref can take open_mutex which creates circular locking
@@ -752,9 +805,9 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
        return 0;
 
 out_err:
-       mutex_unlock(&lo->lo_mutex);
-       if (file)
-               fput(file);
+       loop_global_unlock(lo, is_loop);
+out_putf:
+       fput(file);
        return error;
 }
 
@@ -1136,22 +1189,22 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
                          struct block_device *bdev,
                          const struct loop_config *config)
 {
-       struct file     *file;
-       struct inode    *inode;
+       struct file *file = fget(config->fd);
+       struct inode *inode;
        struct address_space *mapping;
-       int             error;
-       loff_t          size;
-       bool            partscan;
-       unsigned short  bsize;
+       int error;
+       loff_t size;
+       bool partscan;
+       unsigned short bsize;
+       bool is_loop;
+
+       if (!file)
+               return -EBADF;
+       is_loop = is_loop_device(file);
 
        /* This is safe, since we have a reference from open(). */
        __module_get(THIS_MODULE);
 
-       error = -EBADF;
-       file = fget(config->fd);
-       if (!file)
-               goto out;
-
        /*
         * If we don't hold exclusive handle for the device, upgrade to it
         * here to avoid changing device under exclusive owner.
@@ -1162,7 +1215,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
                        goto out_putf;
        }
 
-       error = mutex_lock_killable(&lo->lo_mutex);
+       error = loop_global_lock_killable(lo, is_loop);
        if (error)
                goto out_bdev;
 
@@ -1205,6 +1258,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
                goto out_unlock;
        }
 
+       disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE);
        set_disk_ro(lo->lo_disk, (lo->lo_flags & LO_FLAGS_READ_ONLY) != 0);
 
        INIT_WORK(&lo->rootcg_work, loop_rootcg_workfn);
@@ -1242,6 +1296,9 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
        size = get_loop_size(lo, file);
        loop_set_size(lo, size);
 
+       /* Order wrt reading lo_state in loop_validate_file(). */
+       wmb();
+
        lo->lo_state = Lo_bound;
        if (part_shift)
                lo->lo_flags |= LO_FLAGS_PARTSCAN;
@@ -1249,11 +1306,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
        if (partscan)
                lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN;
 
-       /* Grab the block_device to prevent its destruction after we
-        * put /dev/loopXX inode. Later in __loop_clr_fd() we bdput(bdev).
-        */
-       bdgrab(bdev);
-       mutex_unlock(&lo->lo_mutex);
+       loop_global_unlock(lo, is_loop);
        if (partscan)
                loop_reread_partitions(lo);
        if (!(mode & FMODE_EXCL))
@@ -1261,13 +1314,12 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
        return 0;
 
 out_unlock:
-       mutex_unlock(&lo->lo_mutex);
+       loop_global_unlock(lo, is_loop);
 out_bdev:
        if (!(mode & FMODE_EXCL))
                bd_abort_claiming(bdev, loop_configure);
 out_putf:
        fput(file);
-out:
        /* This is safe: open() is still holding a reference. */
        module_put(THIS_MODULE);
        return error;
@@ -1283,6 +1335,18 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
        int lo_number;
        struct loop_worker *pos, *worker;
 
+       /*
+        * Flush loop_configure() and loop_change_fd(). It is acceptable for
+        * loop_validate_file() to succeed, for actual clear operation has not
+        * started yet.
+        */
+       mutex_lock(&loop_validate_mutex);
+       mutex_unlock(&loop_validate_mutex);
+       /*
+        * loop_validate_file() now fails because l->lo_state != Lo_bound
+        * became visible.
+        */
+
        mutex_lock(&lo->lo_mutex);
        if (WARN_ON_ONCE(lo->lo_state != Lo_rundown)) {
                err = -ENXIO;
@@ -1332,7 +1396,6 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
        blk_queue_physical_block_size(lo->lo_queue, 512);
        blk_queue_io_min(lo->lo_queue, 512);
        if (bdev) {
-               bdput(bdev);
                invalidate_bdev(bdev);
                bdev->bd_inode->i_mapping->wb_err = 0;
        }
@@ -1349,6 +1412,7 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
 
        partscan = lo->lo_flags & LO_FLAGS_PARTSCAN && bdev;
        lo_number = lo->lo_number;
+       disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE);
 out_unlock:
        mutex_unlock(&lo->lo_mutex);
        if (partscan) {
@@ -2269,7 +2333,8 @@ static int loop_add(int i)
        lo->tag_set.queue_depth = 128;
        lo->tag_set.numa_node = NUMA_NO_NODE;
        lo->tag_set.cmd_size = sizeof(struct loop_cmd);
-       lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING;
+       lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING |
+               BLK_MQ_F_NO_SCHED_BY_DEFAULT;
        lo->tag_set.driver_data = lo;
 
        err = blk_mq_alloc_tag_set(&lo->tag_set);
@@ -2325,6 +2390,8 @@ static int loop_add(int i)
        disk->fops              = &lo_fops;
        disk->private_data      = lo;
        disk->queue             = lo->lo_queue;
+       disk->events            = DISK_EVENT_MEDIA_CHANGE;
+       disk->event_flags       = DISK_EVENT_FLAG_UEVENT;
        sprintf(disk->disk_name, "loop%d", i);
        add_disk(disk);
        mutex_unlock(&loop_ctl_mutex);
index 7b4dd10..c84be00 100644 (file)
@@ -74,7 +74,7 @@ static bool n64cart_do_bvec(struct device *dev, struct bio_vec *bv, u32 pos)
 
        n64cart_wait_dma();
 
-       n64cart_write_reg(PI_DRAM_REG, dma_addr + bv->bv_offset);
+       n64cart_write_reg(PI_DRAM_REG, dma_addr);
        n64cart_write_reg(PI_CART_REG, (bstart | CART_DOMAIN) & CART_MAX);
        n64cart_write_reg(PI_WRITE_REG, bv->bv_len - 1);
 
index c383179..5170a63 100644 (file)
@@ -49,6 +49,7 @@
 
 static DEFINE_IDR(nbd_index_idr);
 static DEFINE_MUTEX(nbd_index_mutex);
+static struct workqueue_struct *nbd_del_wq;
 static int nbd_total_devices = 0;
 
 struct nbd_sock {
@@ -113,12 +114,12 @@ struct nbd_device {
        struct mutex config_lock;
        struct gendisk *disk;
        struct workqueue_struct *recv_workq;
+       struct work_struct remove_work;
 
        struct list_head list;
        struct task_struct *task_recv;
        struct task_struct *task_setup;
 
-       struct completion *destroy_complete;
        unsigned long flags;
 
        char *backend;
@@ -237,32 +238,36 @@ static void nbd_dev_remove(struct nbd_device *nbd)
 {
        struct gendisk *disk = nbd->disk;
 
-       if (disk) {
-               del_gendisk(disk);
-               blk_cleanup_disk(disk);
-               blk_mq_free_tag_set(&nbd->tag_set);
-       }
+       del_gendisk(disk);
+       blk_cleanup_disk(disk);
+       blk_mq_free_tag_set(&nbd->tag_set);
 
        /*
-        * Place this in the last just before the nbd is freed to
-        * make sure that the disk and the related kobject are also
-        * totally removed to avoid duplicate creation of the same
-        * one.
+        * Remove from idr after del_gendisk() completes, so if the same ID is
+        * reused, the following add_disk() will succeed.
         */
-       if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) && nbd->destroy_complete)
-               complete(nbd->destroy_complete);
+       mutex_lock(&nbd_index_mutex);
+       idr_remove(&nbd_index_idr, nbd->index);
+       mutex_unlock(&nbd_index_mutex);
 
        kfree(nbd);
 }
 
+static void nbd_dev_remove_work(struct work_struct *work)
+{
+       nbd_dev_remove(container_of(work, struct nbd_device, remove_work));
+}
+
 static void nbd_put(struct nbd_device *nbd)
 {
-       if (refcount_dec_and_mutex_lock(&nbd->refs,
-                                       &nbd_index_mutex)) {
-               idr_remove(&nbd_index_idr, nbd->index);
+       if (!refcount_dec_and_test(&nbd->refs))
+               return;
+
+       /* Call del_gendisk() asynchrounously to prevent deadlock */
+       if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags))
+               queue_work(nbd_del_wq, &nbd->remove_work);
+       else
                nbd_dev_remove(nbd);
-               mutex_unlock(&nbd_index_mutex);
-       }
 }
 
 static int nbd_disconnected(struct nbd_config *config)
@@ -818,6 +823,10 @@ static bool nbd_clear_req(struct request *req, void *data, bool reserved)
 {
        struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
 
+       /* don't abort one completed request */
+       if (blk_mq_request_completed(req))
+               return true;
+
        mutex_lock(&cmd->lock);
        cmd->status = BLK_STS_IOERR;
        mutex_unlock(&cmd->lock);
@@ -1384,6 +1393,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
                       unsigned int cmd, unsigned long arg)
 {
        struct nbd_config *config = nbd->config;
+       loff_t bytesize;
 
        switch (cmd) {
        case NBD_DISCONNECT:
@@ -1398,8 +1408,9 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
        case NBD_SET_SIZE:
                return nbd_set_size(nbd, arg, config->blksize);
        case NBD_SET_SIZE_BLOCKS:
-               return nbd_set_size(nbd, arg * config->blksize,
-                                   config->blksize);
+               if (check_mul_overflow((loff_t)arg, config->blksize, &bytesize))
+                       return -EINVAL;
+               return nbd_set_size(nbd, bytesize, config->blksize);
        case NBD_SET_TIMEOUT:
                nbd_set_cmd_timeout(nbd, arg);
                return 0;
@@ -1661,7 +1672,7 @@ static const struct blk_mq_ops nbd_mq_ops = {
        .timeout        = nbd_xmit_timeout,
 };
 
-static int nbd_dev_add(int index)
+static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
 {
        struct nbd_device *nbd;
        struct gendisk *disk;
@@ -1679,13 +1690,14 @@ static int nbd_dev_add(int index)
        nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
                BLK_MQ_F_BLOCKING;
        nbd->tag_set.driver_data = nbd;
-       nbd->destroy_complete = NULL;
+       INIT_WORK(&nbd->remove_work, nbd_dev_remove_work);
        nbd->backend = NULL;
 
        err = blk_mq_alloc_tag_set(&nbd->tag_set);
        if (err)
                goto out_free_nbd;
 
+       mutex_lock(&nbd_index_mutex);
        if (index >= 0) {
                err = idr_alloc(&nbd_index_idr, nbd, index, index + 1,
                                GFP_KERNEL);
@@ -1696,9 +1708,10 @@ static int nbd_dev_add(int index)
                if (err >= 0)
                        index = err;
        }
+       nbd->index = index;
+       mutex_unlock(&nbd_index_mutex);
        if (err < 0)
                goto out_free_tags;
-       nbd->index = index;
 
        disk = blk_mq_alloc_disk(&nbd->tag_set, NULL);
        if (IS_ERR(disk)) {
@@ -1722,38 +1735,65 @@ static int nbd_dev_add(int index)
 
        mutex_init(&nbd->config_lock);
        refcount_set(&nbd->config_refs, 0);
-       refcount_set(&nbd->refs, 1);
+       /*
+        * Start out with a zero references to keep other threads from using
+        * this device until it is fully initialized.
+        */
+       refcount_set(&nbd->refs, 0);
        INIT_LIST_HEAD(&nbd->list);
        disk->major = NBD_MAJOR;
+
+       /* Too big first_minor can cause duplicate creation of
+        * sysfs files/links, since first_minor will be truncated to
+        * byte in __device_add_disk().
+        */
        disk->first_minor = index << part_shift;
+       if (disk->first_minor > 0xff) {
+               err = -EINVAL;
+               goto out_free_idr;
+       }
+
        disk->minors = 1 << part_shift;
        disk->fops = &nbd_fops;
        disk->private_data = nbd;
        sprintf(disk->disk_name, "nbd%d", index);
        add_disk(disk);
+
+       /*
+        * Now publish the device.
+        */
+       refcount_set(&nbd->refs, refs);
        nbd_total_devices++;
-       return index;
+       return nbd;
 
 out_free_idr:
+       mutex_lock(&nbd_index_mutex);
        idr_remove(&nbd_index_idr, index);
+       mutex_unlock(&nbd_index_mutex);
 out_free_tags:
        blk_mq_free_tag_set(&nbd->tag_set);
 out_free_nbd:
        kfree(nbd);
 out:
-       return err;
+       return ERR_PTR(err);
 }
 
-static int find_free_cb(int id, void *ptr, void *data)
+static struct nbd_device *nbd_find_get_unused(void)
 {
-       struct nbd_device *nbd = ptr;
-       struct nbd_device **found = data;
+       struct nbd_device *nbd;
+       int id;
 
-       if (!refcount_read(&nbd->config_refs)) {
-               *found = nbd;
-               return 1;
+       lockdep_assert_held(&nbd_index_mutex);
+
+       idr_for_each_entry(&nbd_index_idr, nbd, id) {
+               if (refcount_read(&nbd->config_refs) ||
+                   test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags))
+                       continue;
+               if (refcount_inc_not_zero(&nbd->refs))
+                       return nbd;
        }
-       return 0;
+
+       return NULL;
 }
 
 /* Netlink interface. */
@@ -1802,8 +1842,7 @@ static int nbd_genl_size_set(struct genl_info *info, struct nbd_device *nbd)
 
 static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
 {
-       DECLARE_COMPLETION_ONSTACK(destroy_complete);
-       struct nbd_device *nbd = NULL;
+       struct nbd_device *nbd;
        struct nbd_config *config;
        int index = -1;
        int ret;
@@ -1825,55 +1864,29 @@ static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
 again:
        mutex_lock(&nbd_index_mutex);
        if (index == -1) {
-               ret = idr_for_each(&nbd_index_idr, &find_free_cb, &nbd);
-               if (ret == 0) {
-                       int new_index;
-                       new_index = nbd_dev_add(-1);
-                       if (new_index < 0) {
-                               mutex_unlock(&nbd_index_mutex);
-                               printk(KERN_ERR "nbd: failed to add new device\n");
-                               return new_index;
-                       }
-                       nbd = idr_find(&nbd_index_idr, new_index);
-               }
+               nbd = nbd_find_get_unused();
        } else {
                nbd = idr_find(&nbd_index_idr, index);
-               if (!nbd) {
-                       ret = nbd_dev_add(index);
-                       if (ret < 0) {
+               if (nbd) {
+                       if ((test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) &&
+                            test_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags)) ||
+                           !refcount_inc_not_zero(&nbd->refs)) {
                                mutex_unlock(&nbd_index_mutex);
-                               printk(KERN_ERR "nbd: failed to add new device\n");
-                               return ret;
+                               pr_err("nbd: device at index %d is going down\n",
+                                       index);
+                               return -EINVAL;
                        }
-                       nbd = idr_find(&nbd_index_idr, index);
                }
        }
-       if (!nbd) {
-               printk(KERN_ERR "nbd: couldn't find device at index %d\n",
-                      index);
-               mutex_unlock(&nbd_index_mutex);
-               return -EINVAL;
-       }
-
-       if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) &&
-           test_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags)) {
-               nbd->destroy_complete = &destroy_complete;
-               mutex_unlock(&nbd_index_mutex);
-
-               /* Wait untill the the nbd stuff is totally destroyed */
-               wait_for_completion(&destroy_complete);
-               goto again;
-       }
+       mutex_unlock(&nbd_index_mutex);
 
-       if (!refcount_inc_not_zero(&nbd->refs)) {
-               mutex_unlock(&nbd_index_mutex);
-               if (index == -1)
-                       goto again;
-               printk(KERN_ERR "nbd: device at index %d is going down\n",
-                      index);
-               return -EINVAL;
+       if (!nbd) {
+               nbd = nbd_dev_add(index, 2);
+               if (IS_ERR(nbd)) {
+                       pr_err("nbd: failed to add new device\n");
+                       return PTR_ERR(nbd);
+               }
        }
-       mutex_unlock(&nbd_index_mutex);
 
        mutex_lock(&nbd->config_lock);
        if (refcount_read(&nbd->config_refs)) {
@@ -2004,15 +2017,19 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd)
 {
        mutex_lock(&nbd->config_lock);
        nbd_disconnect(nbd);
-       nbd_clear_sock(nbd);
-       mutex_unlock(&nbd->config_lock);
+       sock_shutdown(nbd);
        /*
         * Make sure recv thread has finished, so it does not drop the last
         * config ref and try to destroy the workqueue from inside the work
-        * queue.
+        * queue. And this also ensure that we can safely call nbd_clear_que()
+        * to cancel the inflight I/Os.
         */
        if (nbd->recv_workq)
                flush_workqueue(nbd->recv_workq);
+       nbd_clear_que(nbd);
+       nbd->task_setup = NULL;
+       mutex_unlock(&nbd->config_lock);
+
        if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
                               &nbd->config->runtime_flags))
                nbd_config_put(nbd);
@@ -2416,16 +2433,21 @@ static int __init nbd_init(void)
        if (register_blkdev(NBD_MAJOR, "nbd"))
                return -EIO;
 
+       nbd_del_wq = alloc_workqueue("nbd-del", WQ_UNBOUND, 0);
+       if (!nbd_del_wq) {
+               unregister_blkdev(NBD_MAJOR, "nbd");
+               return -ENOMEM;
+       }
+
        if (genl_register_family(&nbd_genl_family)) {
+               destroy_workqueue(nbd_del_wq);
                unregister_blkdev(NBD_MAJOR, "nbd");
                return -EINVAL;
        }
        nbd_dbg_init();
 
-       mutex_lock(&nbd_index_mutex);
        for (i = 0; i < nbds_max; i++)
-               nbd_dev_add(i);
-       mutex_unlock(&nbd_index_mutex);
+               nbd_dev_add(i, 1);
        return 0;
 }
 
@@ -2434,7 +2456,10 @@ static int nbd_exit_cb(int id, void *ptr, void *data)
        struct list_head *list = (struct list_head *)data;
        struct nbd_device *nbd = ptr;
 
-       list_add_tail(&nbd->list, list);
+       /* Skip nbd that is being removed asynchronously */
+       if (refcount_read(&nbd->refs))
+               list_add_tail(&nbd->list, list);
+
        return 0;
 }
 
@@ -2457,6 +2482,9 @@ static void __exit nbd_cleanup(void)
                nbd_put(nbd);
        }
 
+       /* Also wait for nbd_dev_remove_work() completes */
+       destroy_workqueue(nbd_del_wq);
+
        idr_destroy(&nbd_index_idr);
        genl_unregister_family(&nbd_genl_family);
        unregister_blkdev(NBD_MAJOR, "nbd");
index d734e9e..187d779 100644 (file)
 #include <linux/init.h>
 #include "null_blk.h"
 
-#define PAGE_SECTORS_SHIFT     (PAGE_SHIFT - SECTOR_SHIFT)
-#define PAGE_SECTORS           (1 << PAGE_SECTORS_SHIFT)
-#define SECTOR_MASK            (PAGE_SECTORS - 1)
-
 #define FREE_BATCH             16
 
 #define TICKS_PER_SEC          50ULL
@@ -1721,8 +1717,7 @@ static int null_gendisk_register(struct nullb *nullb)
                        return ret;
        }
 
-       add_disk(disk);
-       return 0;
+       return add_disk(disk);
 }
 
 static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set)
index 9b32989..675327d 100644 (file)
@@ -892,7 +892,7 @@ static void pd_probe_drive(struct pd_unit *disk)
                return;
 
        p = blk_mq_alloc_disk(&disk->tag_set, disk);
-       if (!p) {
+       if (IS_ERR(p)) {
                blk_mq_free_tag_set(&disk->tag_set);
                return;
        }
index 538446b..0f26b25 100644 (file)
@@ -1183,10 +1183,8 @@ try_next_bio:
        wakeup = (pd->write_congestion_on > 0
                        && pd->bio_queue_size <= pd->write_congestion_off);
        spin_unlock(&pd->lock);
-       if (wakeup) {
-               clear_bdi_congested(pd->disk->queue->backing_dev_info,
-                                       BLK_RW_ASYNC);
-       }
+       if (wakeup)
+               clear_bdi_congested(pd->disk->bdi, BLK_RW_ASYNC);
 
        pkt->sleep_time = max(PACKET_WAIT_TIME, 1);
        pkt_set_state(pkt, PACKET_WAITING_STATE);
@@ -2366,7 +2364,7 @@ static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
        spin_lock(&pd->lock);
        if (pd->write_congestion_on > 0
            && pd->bio_queue_size >= pd->write_congestion_on) {
-               set_bdi_congested(q->backing_dev_info, BLK_RW_ASYNC);
+               set_bdi_congested(bio->bi_bdev->bd_disk->bdi, BLK_RW_ASYNC);
                do {
                        spin_unlock(&pd->lock);
                        congestion_wait(BLK_RW_ASYNC, HZ);
index f374ea2..8d51efb 100644 (file)
@@ -83,26 +83,12 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev,
        unsigned int offset = 0;
        struct req_iterator iter;
        struct bio_vec bvec;
-       unsigned int i = 0;
-       size_t size;
-       void *buf;
 
        rq_for_each_segment(bvec, req, iter) {
-               unsigned long flags;
-               dev_dbg(&dev->sbd.core, "%s:%u: bio %u: %u sectors from %llu\n",
-                       __func__, __LINE__, i, bio_sectors(iter.bio),
-                       iter.bio->bi_iter.bi_sector);
-
-               size = bvec.bv_len;
-               buf = bvec_kmap_irq(&bvec, &flags);
                if (gather)
-                       memcpy(dev->bounce_buf+offset, buf, size);
+                       memcpy_from_bvec(dev->bounce_buf + offset, &bvec);
                else
-                       memcpy(buf, dev->bounce_buf+offset, size);
-               offset += size;
-               flush_kernel_dcache_page(bvec.bv_page);
-               bvec_kunmap_irq(buf, &flags);
-               i++;
+                       memcpy_to_bvec(&bvec, dev->bounce_buf + offset);
        }
 }
 
index 7fbf469..c7b19e1 100644 (file)
@@ -541,7 +541,7 @@ static struct bio *ps3vram_do_bio(struct ps3_system_bus_device *dev,
 
        bio_for_each_segment(bvec, bio, iter) {
                /* PS3 is ppc64, so we don't handle highmem */
-               char *ptr = page_address(bvec.bv_page) + bvec.bv_offset;
+               char *ptr = bvec_virt(&bvec);
                size_t len = bvec.bv_len, retlen;
 
                dev_dbg(&dev->core, "    %s %zu bytes at offset %llu\n", op,
index 90b947c..e65c9d7 100644 (file)
@@ -1219,24 +1219,13 @@ static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
        rbd_dev->mapping.size = 0;
 }
 
-static void zero_bvec(struct bio_vec *bv)
-{
-       void *buf;
-       unsigned long flags;
-
-       buf = bvec_kmap_irq(bv, &flags);
-       memset(buf, 0, bv->bv_len);
-       flush_dcache_page(bv->bv_page);
-       bvec_kunmap_irq(buf, &flags);
-}
-
 static void zero_bios(struct ceph_bio_iter *bio_pos, u32 off, u32 bytes)
 {
        struct ceph_bio_iter it = *bio_pos;
 
        ceph_bio_iter_advance(&it, off);
        ceph_bio_iter_advance_step(&it, bytes, ({
-               zero_bvec(&bv);
+               memzero_bvec(&bv);
        }));
 }
 
@@ -1246,7 +1235,7 @@ static void zero_bvecs(struct ceph_bvec_iter *bvec_pos, u32 off, u32 bytes)
 
        ceph_bvec_iter_advance(&it, off);
        ceph_bvec_iter_advance_step(&it, bytes, ({
-               zero_bvec(&bv);
+               memzero_bvec(&bv);
        }));
 }
 
@@ -2997,8 +2986,7 @@ static bool is_zero_bvecs(struct bio_vec *bvecs, u32 bytes)
        };
 
        ceph_bvec_iter_advance_step(&it, bytes, ({
-               if (memchr_inv(page_address(bv.bv_page) + bv.bv_offset, 0,
-                              bv.bv_len))
+               if (memchr_inv(bvec_virt(&bv), 0, bv.bv_len))
                        return false;
        }));
        return true;
index 324afdd..4b93fd8 100644 (file)
@@ -227,17 +227,17 @@ static ssize_t state_show(struct kobject *kobj,
 
        switch (dev->dev_state) {
        case DEV_STATE_INIT:
-               return snprintf(page, PAGE_SIZE, "init\n");
+               return sysfs_emit(page, "init\n");
        case DEV_STATE_MAPPED:
                /* TODO fix cli tool before changing to proper state */
-               return snprintf(page, PAGE_SIZE, "open\n");
+               return sysfs_emit(page, "open\n");
        case DEV_STATE_MAPPED_DISCONNECTED:
                /* TODO fix cli tool before changing to proper state */
-               return snprintf(page, PAGE_SIZE, "closed\n");
+               return sysfs_emit(page, "closed\n");
        case DEV_STATE_UNMAPPED:
-               return snprintf(page, PAGE_SIZE, "unmapped\n");
+               return sysfs_emit(page, "unmapped\n");
        default:
-               return snprintf(page, PAGE_SIZE, "unknown\n");
+               return sysfs_emit(page, "unknown\n");
        }
 }
 
@@ -263,7 +263,7 @@ static ssize_t mapping_path_show(struct kobject *kobj,
 
        dev = container_of(kobj, struct rnbd_clt_dev, kobj);
 
-       return scnprintf(page, PAGE_SIZE, "%s\n", dev->pathname);
+       return sysfs_emit(page, "%s\n", dev->pathname);
 }
 
 static struct kobj_attribute rnbd_clt_mapping_path_attr =
@@ -276,8 +276,7 @@ static ssize_t access_mode_show(struct kobject *kobj,
 
        dev = container_of(kobj, struct rnbd_clt_dev, kobj);
 
-       return snprintf(page, PAGE_SIZE, "%s\n",
-                       rnbd_access_mode_str(dev->access_mode));
+       return sysfs_emit(page, "%s\n", rnbd_access_mode_str(dev->access_mode));
 }
 
 static struct kobj_attribute rnbd_clt_access_mode =
@@ -286,8 +285,8 @@ static struct kobj_attribute rnbd_clt_access_mode =
 static ssize_t rnbd_clt_unmap_dev_show(struct kobject *kobj,
                                        struct kobj_attribute *attr, char *page)
 {
-       return scnprintf(page, PAGE_SIZE, "Usage: echo <normal|force> > %s\n",
-                        attr->attr.name);
+       return sysfs_emit(page, "Usage: echo <normal|force> > %s\n",
+                         attr->attr.name);
 }
 
 static ssize_t rnbd_clt_unmap_dev_store(struct kobject *kobj,
@@ -357,9 +356,8 @@ static ssize_t rnbd_clt_resize_dev_show(struct kobject *kobj,
                                         struct kobj_attribute *attr,
                                         char *page)
 {
-       return scnprintf(page, PAGE_SIZE,
-                        "Usage: echo <new size in sectors> > %s\n",
-                        attr->attr.name);
+       return sysfs_emit(page, "Usage: echo <new size in sectors> > %s\n",
+                         attr->attr.name);
 }
 
 static ssize_t rnbd_clt_resize_dev_store(struct kobject *kobj,
@@ -390,8 +388,7 @@ static struct kobj_attribute rnbd_clt_resize_dev_attr =
 static ssize_t rnbd_clt_remap_dev_show(struct kobject *kobj,
                                        struct kobj_attribute *attr, char *page)
 {
-       return scnprintf(page, PAGE_SIZE, "Usage: echo <1> > %s\n",
-                        attr->attr.name);
+       return sysfs_emit(page, "Usage: echo <1> > %s\n", attr->attr.name);
 }
 
 static ssize_t rnbd_clt_remap_dev_store(struct kobject *kobj,
@@ -436,7 +433,7 @@ static ssize_t session_show(struct kobject *kobj, struct kobj_attribute *attr,
 
        dev = container_of(kobj, struct rnbd_clt_dev, kobj);
 
-       return scnprintf(page, PAGE_SIZE, "%s\n", dev->sess->sessname);
+       return sysfs_emit(page, "%s\n", dev->sess->sessname);
 }
 
 static struct kobj_attribute rnbd_clt_session_attr =
@@ -499,8 +496,8 @@ static ssize_t rnbd_clt_map_device_show(struct kobject *kobj,
                                         struct kobj_attribute *attr,
                                         char *page)
 {
-       return scnprintf(page, PAGE_SIZE,
-                        "Usage: echo \"[dest_port=server port number] sessname=<name of the rtrs session> path=<[srcaddr@]dstaddr> [path=<[srcaddr@]dstaddr>] device_path=<full path on remote side> [access_mode=<ro|rw|migration>] [nr_poll_queues=<number of queues>]\" > %s\n\naddr ::= [ ip:<ipv4> | ip:<ipv6> | gid:<gid> ]\n",
+       return sysfs_emit(page,
+                         "Usage: echo \"[dest_port=server port number] sessname=<name of the rtrs session> path=<[srcaddr@]dstaddr> [path=<[srcaddr@]dstaddr>] device_path=<full path on remote side> [access_mode=<ro|rw|migration>] [nr_poll_queues=<number of queues>]\" > %s\n\naddr ::= [ ip:<ipv4> | ip:<ipv6> | gid:<gid> ]\n",
                         attr->attr.name);
 }
 
index e9cc413..bd4a41a 100644 (file)
@@ -271,7 +271,7 @@ unlock:
         */
        if (cpu_q)
                *cpup = cpu_q->cpu;
-       put_cpu_var(sess->cpu_rr);
+       put_cpu_ptr(sess->cpu_rr);
 
        if (q)
                rnbd_clt_dev_requeue(q);
index acf5fce..4db98e0 100644 (file)
@@ -90,8 +90,8 @@ static ssize_t read_only_show(struct kobject *kobj, struct kobj_attribute *attr,
 
        sess_dev = container_of(kobj, struct rnbd_srv_sess_dev, kobj);
 
-       return scnprintf(page, PAGE_SIZE, "%d\n",
-                        !(sess_dev->open_flags & FMODE_WRITE));
+       return sysfs_emit(page, "%d\n",
+                         !(sess_dev->open_flags & FMODE_WRITE));
 }
 
 static struct kobj_attribute rnbd_srv_dev_session_ro_attr =
@@ -105,8 +105,8 @@ static ssize_t access_mode_show(struct kobject *kobj,
 
        sess_dev = container_of(kobj, struct rnbd_srv_sess_dev, kobj);
 
-       return scnprintf(page, PAGE_SIZE, "%s\n",
-                        rnbd_access_mode_str(sess_dev->access_mode));
+       return sysfs_emit(page, "%s\n",
+                         rnbd_access_mode_str(sess_dev->access_mode));
 }
 
 static struct kobj_attribute rnbd_srv_dev_session_access_mode_attr =
@@ -119,7 +119,7 @@ static ssize_t mapping_path_show(struct kobject *kobj,
 
        sess_dev = container_of(kobj, struct rnbd_srv_sess_dev, kobj);
 
-       return scnprintf(page, PAGE_SIZE, "%s\n", sess_dev->pathname);
+       return sysfs_emit(page, "%s\n", sess_dev->pathname);
 }
 
 static struct kobj_attribute rnbd_srv_dev_session_mapping_path_attr =
@@ -128,8 +128,8 @@ static struct kobj_attribute rnbd_srv_dev_session_mapping_path_attr =
 static ssize_t rnbd_srv_dev_session_force_close_show(struct kobject *kobj,
                                        struct kobj_attribute *attr, char *page)
 {
-       return scnprintf(page, PAGE_SIZE, "Usage: echo 1 > %s\n",
-                        attr->attr.name);
+       return sysfs_emit(page, "Usage: echo 1 > %s\n",
+                         attr->attr.name);
 }
 
 static ssize_t rnbd_srv_dev_session_force_close_store(struct kobject *kobj,
index 7b54353..420cd95 100644 (file)
@@ -1373,7 +1373,7 @@ static void carm_free_disk(struct carm_host *host, unsigned int port_no)
        if (!disk)
                return;
 
-       if (disk->flags & GENHD_FL_UP)
+       if (host->state > HST_DEV_ACTIVATE)
                del_gendisk(disk);
        blk_cleanup_disk(disk);
 }
index 4b49df2..57c6ae7 100644 (file)
@@ -166,11 +166,8 @@ static inline void virtblk_request_done(struct request *req)
 {
        struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
 
-       if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
-               kfree(page_address(req->special_vec.bv_page) +
-                     req->special_vec.bv_offset);
-       }
-
+       if (req->rq_flags & RQF_SPECIAL_PAYLOAD)
+               kfree(bvec_virt(&req->special_vec));
        blk_mq_end_request(req, virtblk_result(vbr));
 }
 
@@ -692,6 +689,28 @@ static const struct blk_mq_ops virtio_mq_ops = {
 static unsigned int virtblk_queue_depth;
 module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
 
+static int virtblk_validate(struct virtio_device *vdev)
+{
+       u32 blk_size;
+
+       if (!vdev->config->get) {
+               dev_err(&vdev->dev, "%s failure: config access disabled\n",
+                       __func__);
+               return -EINVAL;
+       }
+
+       if (!virtio_has_feature(vdev, VIRTIO_BLK_F_BLK_SIZE))
+               return 0;
+
+       blk_size = virtio_cread32(vdev,
+                       offsetof(struct virtio_blk_config, blk_size));
+
+       if (blk_size < SECTOR_SIZE || blk_size > PAGE_SIZE)
+               __virtio_clear_bit(vdev, VIRTIO_BLK_F_BLK_SIZE);
+
+       return 0;
+}
+
 static int virtblk_probe(struct virtio_device *vdev)
 {
        struct virtio_blk *vblk;
@@ -703,12 +722,6 @@ static int virtblk_probe(struct virtio_device *vdev)
        u8 physical_block_exp, alignment_offset;
        unsigned int queue_depth;
 
-       if (!vdev->config->get) {
-               dev_err(&vdev->dev, "%s failure: config access disabled\n",
-                       __func__);
-               return -EINVAL;
-       }
-
        err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
                             GFP_KERNEL);
        if (err < 0)
@@ -823,6 +836,14 @@ static int virtblk_probe(struct virtio_device *vdev)
        else
                blk_size = queue_logical_block_size(q);
 
+       if (unlikely(blk_size < SECTOR_SIZE || blk_size > PAGE_SIZE)) {
+               dev_err(&vdev->dev,
+                       "block size is changed unexpectedly, now is %u\n",
+                       blk_size);
+               err = -EINVAL;
+               goto out_cleanup_disk;
+       }
+
        /* Use topology information if available */
        err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
                                   struct virtio_blk_config, physical_block_exp,
@@ -878,9 +899,14 @@ static int virtblk_probe(struct virtio_device *vdev)
        virtblk_update_capacity(vblk, false);
        virtio_device_ready(vdev);
 
-       device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
+       err = device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
+       if (err)
+               goto out_cleanup_disk;
+
        return 0;
 
+out_cleanup_disk:
+       blk_cleanup_disk(vblk->disk);
 out_free_tags:
        blk_mq_free_tag_set(&vblk->tag_set);
 out_free_vq:
@@ -983,6 +1009,7 @@ static struct virtio_driver virtio_blk = {
        .driver.name                    = KBUILD_MODNAME,
        .driver.owner                   = THIS_MODULE,
        .id_table                       = id_table,
+       .validate                       = virtblk_validate,
        .probe                          = virtblk_probe,
        .remove                         = virtblk_remove,
        .config_changed                 = virtblk_config_changed,
index d83fee2..715bfa8 100644 (file)
@@ -1092,7 +1092,6 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
        err = xlbd_reserve_minors(minor, nr_minors);
        if (err)
                return err;
-       err = -ENODEV;
 
        memset(&info->tag_set, 0, sizeof(info->tag_set));
        info->tag_set.ops = &blkfront_mq_ops;
index 38cb116..148a4dd 100644 (file)
@@ -100,6 +100,7 @@ static const char * const clock_names[SYSC_MAX_CLOCKS] = {
  * @cookie: data used by legacy platform callbacks
  * @name: name if available
  * @revision: interconnect target module revision
+ * @reserved: target module is reserved and already in use
  * @enabled: sysc runtime enabled status
  * @needs_resume: runtime resume needed on resume from suspend
  * @child_needs_resume: runtime resume needed for child on resume from suspend
@@ -130,6 +131,7 @@ struct sysc {
        struct ti_sysc_cookie cookie;
        const char *name;
        u32 revision;
+       unsigned int reserved:1;
        unsigned int enabled:1;
        unsigned int needs_resume:1;
        unsigned int child_needs_resume:1;
@@ -2951,6 +2953,8 @@ static int sysc_init_soc(struct sysc *ddata)
                case SOC_3430 ... SOC_3630:
                        sysc_add_disabled(0x48304000);  /* timer12 */
                        break;
+               case SOC_AM3:
+                       sysc_add_disabled(0x48310000);  /* rng */
                default:
                        break;
                }
@@ -3093,7 +3097,9 @@ static int sysc_probe(struct platform_device *pdev)
                return error;
 
        error = sysc_check_active_timer(ddata);
-       if (error)
+       if (error == -ENXIO)
+               ddata->reserved = true;
+       else if (error)
                return error;
 
        error = sysc_get_clocks(ddata);
@@ -3130,11 +3136,15 @@ static int sysc_probe(struct platform_device *pdev)
        sysc_show_registers(ddata);
 
        ddata->dev->type = &sysc_device_type;
-       error = of_platform_populate(ddata->dev->of_node, sysc_match_table,
-                                    pdata ? pdata->auxdata : NULL,
-                                    ddata->dev);
-       if (error)
-               goto err;
+
+       if (!ddata->reserved) {
+               error = of_platform_populate(ddata->dev->of_node,
+                                            sysc_match_table,
+                                            pdata ? pdata->auxdata : NULL,
+                                            ddata->dev);
+               if (error)
+                       goto err;
+       }
 
        INIT_DELAYED_WORK(&ddata->idle_work, ti_sysc_idle);
 
index 3f166c8..239eca4 100644 (file)
@@ -524,6 +524,20 @@ config HW_RANDOM_XIPHERA
          To compile this driver as a module, choose M here: the
          module will be called xiphera-trng.
 
+config HW_RANDOM_ARM_SMCCC_TRNG
+       tristate "Arm SMCCC TRNG firmware interface support"
+       depends on HAVE_ARM_SMCCC_DISCOVERY
+       default HW_RANDOM
+       help
+         Say 'Y' to enable the True Random Number Generator driver using
+         the Arm SMCCC TRNG firmware interface. This reads entropy from
+         higher exception levels (firmware, hypervisor). Uses SMCCC for
+         communicating with the firmware:
+         https://developer.arm.com/documentation/den0098/latest/
+
+         To compile this driver as a module, choose M here: the
+         module will be called arm_smccc_trng.
+
 endif # HW_RANDOM
 
 config UML_RANDOM
index 8933fad..a5a1c76 100644 (file)
@@ -45,3 +45,4 @@ obj-$(CONFIG_HW_RANDOM_OPTEE) += optee-rng.o
 obj-$(CONFIG_HW_RANDOM_NPCM) += npcm-rng.o
 obj-$(CONFIG_HW_RANDOM_CCTRNG) += cctrng.o
 obj-$(CONFIG_HW_RANDOM_XIPHERA) += xiphera-trng.o
+obj-$(CONFIG_HW_RANDOM_ARM_SMCCC_TRNG) += arm_smccc_trng.o
index d8d4ef5..c22d418 100644 (file)
@@ -124,7 +124,7 @@ static struct hwrng amd_rng = {
        .read           = amd_rng_read,
 };
 
-static int __init mod_init(void)
+static int __init amd_rng_mod_init(void)
 {
        int err;
        struct pci_dev *pdev = NULL;
@@ -188,7 +188,7 @@ out:
        return err;
 }
 
-static void __exit mod_exit(void)
+static void __exit amd_rng_mod_exit(void)
 {
        struct amd768_priv *priv;
 
@@ -203,8 +203,8 @@ static void __exit mod_exit(void)
        kfree(priv);
 }
 
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(amd_rng_mod_init);
+module_exit(amd_rng_mod_exit);
 
 MODULE_AUTHOR("The Linux Kernel team");
 MODULE_DESCRIPTION("H/W RNG driver for AMD chipsets");
diff --git a/drivers/char/hw_random/arm_smccc_trng.c b/drivers/char/hw_random/arm_smccc_trng.c
new file mode 100644 (file)
index 0000000..b24ac39
--- /dev/null
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Randomness driver for the ARM SMCCC TRNG Firmware Interface
+ * https://developer.arm.com/documentation/den0098/latest/
+ *
+ *  Copyright (C) 2020 Arm Ltd.
+ *
+ * The ARM TRNG firmware interface specifies a protocol to read entropy
+ * from a higher exception level, to abstract from any machine specific
+ * implemenations and allow easier use in hypervisors.
+ *
+ * The firmware interface is realised using the SMCCC specification.
+ */
+
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/hw_random.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/arm-smccc.h>
+
+#ifdef CONFIG_ARM64
+#define ARM_SMCCC_TRNG_RND     ARM_SMCCC_TRNG_RND64
+#define MAX_BITS_PER_CALL      (3 * 64UL)
+#else
+#define ARM_SMCCC_TRNG_RND     ARM_SMCCC_TRNG_RND32
+#define MAX_BITS_PER_CALL      (3 * 32UL)
+#endif
+
+/* We don't want to allow the firmware to stall us forever. */
+#define SMCCC_TRNG_MAX_TRIES   20
+
+#define SMCCC_RET_TRNG_INVALID_PARAMETER       -2
+#define SMCCC_RET_TRNG_NO_ENTROPY              -3
+
+static int copy_from_registers(char *buf, struct arm_smccc_res *res,
+                              size_t bytes)
+{
+       unsigned int chunk, copied;
+
+       if (bytes == 0)
+               return 0;
+
+       chunk = min(bytes, sizeof(long));
+       memcpy(buf, &res->a3, chunk);
+       copied = chunk;
+       if (copied >= bytes)
+               return copied;
+
+       chunk = min((bytes - copied), sizeof(long));
+       memcpy(&buf[copied], &res->a2, chunk);
+       copied += chunk;
+       if (copied >= bytes)
+               return copied;
+
+       chunk = min((bytes - copied), sizeof(long));
+       memcpy(&buf[copied], &res->a1, chunk);
+
+       return copied + chunk;
+}
+
+static int smccc_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+       struct arm_smccc_res res;
+       u8 *buf = data;
+       unsigned int copied = 0;
+       int tries = 0;
+
+       while (copied < max) {
+               size_t bits = min_t(size_t, (max - copied) * BITS_PER_BYTE,
+                                 MAX_BITS_PER_CALL);
+
+               arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND, bits, &res);
+               if ((int)res.a0 < 0)
+                       return (int)res.a0;
+
+               switch ((int)res.a0) {
+               case SMCCC_RET_SUCCESS:
+                       copied += copy_from_registers(buf + copied, &res,
+                                                     bits / BITS_PER_BYTE);
+                       tries = 0;
+                       break;
+               case SMCCC_RET_TRNG_NO_ENTROPY:
+                       if (!wait)
+                               return copied;
+                       tries++;
+                       if (tries >= SMCCC_TRNG_MAX_TRIES)
+                               return copied;
+                       cond_resched();
+                       break;
+               }
+       }
+
+       return copied;
+}
+
+static int smccc_trng_probe(struct platform_device *pdev)
+{
+       struct hwrng *trng;
+
+       trng = devm_kzalloc(&pdev->dev, sizeof(*trng), GFP_KERNEL);
+       if (!trng)
+               return -ENOMEM;
+
+       trng->name = "smccc_trng";
+       trng->read = smccc_trng_read;
+
+       platform_set_drvdata(pdev, trng);
+
+       return devm_hwrng_register(&pdev->dev, trng);
+}
+
+static struct platform_driver smccc_trng_driver = {
+       .driver = {
+               .name           = "smccc_trng",
+       },
+       .probe          = smccc_trng_probe,
+};
+module_platform_driver(smccc_trng_driver);
+
+MODULE_ALIAS("platform:smccc_trng");
+MODULE_AUTHOR("Andre Przywara");
+MODULE_LICENSE("GPL");
index e1d421a..138ce43 100644 (file)
@@ -83,7 +83,7 @@ static struct hwrng geode_rng = {
 };
 
 
-static int __init mod_init(void)
+static int __init geode_rng_init(void)
 {
        int err = -ENODEV;
        struct pci_dev *pdev = NULL;
@@ -124,7 +124,7 @@ err_unmap:
        goto out;
 }
 
-static void __exit mod_exit(void)
+static void __exit geode_rng_exit(void)
 {
        void __iomem *mem = (void __iomem *)geode_rng.priv;
 
@@ -132,8 +132,8 @@ static void __exit mod_exit(void)
        iounmap(mem);
 }
 
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(geode_rng_init);
+module_exit(geode_rng_exit);
 
 MODULE_DESCRIPTION("H/W RNG driver for AMD Geode LX CPUs");
 MODULE_LICENSE("GPL");
index d740b88..7b171cb 100644 (file)
@@ -325,7 +325,7 @@ PFX "RNG, try using the 'no_fwh_detect' option.\n";
 }
 
 
-static int __init mod_init(void)
+static int __init intel_rng_mod_init(void)
 {
        int err = -ENODEV;
        int i;
@@ -403,7 +403,7 @@ out:
 
 }
 
-static void __exit mod_exit(void)
+static void __exit intel_rng_mod_exit(void)
 {
        void __iomem *mem = (void __iomem *)intel_rng.priv;
 
@@ -411,8 +411,8 @@ static void __exit mod_exit(void)
        iounmap(mem);
 }
 
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(intel_rng_mod_init);
+module_exit(intel_rng_mod_exit);
 
 MODULE_DESCRIPTION("H/W RNG driver for Intel chipsets");
 MODULE_LICENSE("GPL");
index 39943bc..7444cc1 100644 (file)
@@ -192,7 +192,7 @@ static struct hwrng via_rng = {
 };
 
 
-static int __init mod_init(void)
+static int __init via_rng_mod_init(void)
 {
        int err;
 
@@ -209,13 +209,13 @@ static int __init mod_init(void)
 out:
        return err;
 }
-module_init(mod_init);
+module_init(via_rng_mod_init);
 
-static void __exit mod_exit(void)
+static void __exit via_rng_mod_exit(void)
 {
        hwrng_unregister(&via_rng);
 }
-module_exit(mod_exit);
+module_exit(via_rng_mod_exit);
 
 static struct x86_cpu_id __maybe_unused via_rng_cpu_id[] = {
        X86_MATCH_FEATURE(X86_FEATURE_XSTORE, NULL),
index 4308f9c..d6ba644 100644 (file)
@@ -89,7 +89,6 @@ config TCG_TIS_SYNQUACER
 config TCG_TIS_I2C_CR50
        tristate "TPM Interface Specification 2.0 Interface (I2C - CR50)"
        depends on I2C
-       select TCG_CR50
        help
          This is a driver for the Google cr50 I2C TPM interface which is a
          custom microcontroller and requires a custom i2c protocol interface
index 2ccdf8a..6e32355 100644 (file)
@@ -254,11 +254,11 @@ static int ftpm_tee_probe(struct device *dev)
        pvt_data->session = sess_arg.session;
 
        /* Allocate dynamic shared memory with fTPM TA */
-       pvt_data->shm = tee_shm_alloc(pvt_data->ctx,
-                                     MAX_COMMAND_SIZE + MAX_RESPONSE_SIZE,
-                                     TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
+       pvt_data->shm = tee_shm_alloc_kernel_buf(pvt_data->ctx,
+                                                MAX_COMMAND_SIZE +
+                                                MAX_RESPONSE_SIZE);
        if (IS_ERR(pvt_data->shm)) {
-               dev_err(dev, "%s: tee_shm_alloc failed\n", __func__);
+               dev_err(dev, "%s: tee_shm_alloc_kernel_buf failed\n", __func__);
                rc = -ENOMEM;
                goto out_shm_alloc;
        }
index 9036047..3af4c07 100644 (file)
@@ -106,17 +106,12 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
 {
        struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
        u16 len;
-       int sig;
 
        if (!ibmvtpm->rtce_buf) {
                dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
                return 0;
        }
 
-       sig = wait_event_interruptible(ibmvtpm->wq, !ibmvtpm->tpm_processing_cmd);
-       if (sig)
-               return -EINTR;
-
        len = ibmvtpm->res_len;
 
        if (count < len) {
@@ -237,7 +232,7 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
         * set the processing flag before the Hcall, since we may get the
         * result (interrupt) before even being able to check rc.
         */
-       ibmvtpm->tpm_processing_cmd = true;
+       ibmvtpm->tpm_processing_cmd = 1;
 
 again:
        rc = ibmvtpm_send_crq(ibmvtpm->vdev,
@@ -255,7 +250,7 @@ again:
                        goto again;
                }
                dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
-               ibmvtpm->tpm_processing_cmd = false;
+               ibmvtpm->tpm_processing_cmd = 0;
        }
 
        spin_unlock(&ibmvtpm->rtce_lock);
@@ -269,7 +264,9 @@ static void tpm_ibmvtpm_cancel(struct tpm_chip *chip)
 
 static u8 tpm_ibmvtpm_status(struct tpm_chip *chip)
 {
-       return 0;
+       struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
+
+       return ibmvtpm->tpm_processing_cmd;
 }
 
 /**
@@ -457,7 +454,7 @@ static const struct tpm_class_ops tpm_ibmvtpm = {
        .send = tpm_ibmvtpm_send,
        .cancel = tpm_ibmvtpm_cancel,
        .status = tpm_ibmvtpm_status,
-       .req_complete_mask = 0,
+       .req_complete_mask = 1,
        .req_complete_val = 0,
        .req_canceled = tpm_ibmvtpm_req_canceled,
 };
@@ -550,7 +547,7 @@ static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
                case VTPM_TPM_COMMAND_RES:
                        /* len of the data in rtce buffer */
                        ibmvtpm->res_len = be16_to_cpu(crq->len);
-                       ibmvtpm->tpm_processing_cmd = false;
+                       ibmvtpm->tpm_processing_cmd = 0;
                        wake_up_interruptible(&ibmvtpm->wq);
                        return;
                default:
@@ -688,8 +685,15 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
                goto init_irq_cleanup;
        }
 
-       if (!strcmp(id->compat, "IBM,vtpm20")) {
+
+       if (!strcmp(id->compat, "IBM,vtpm20"))
                chip->flags |= TPM_CHIP_FLAG_TPM2;
+
+       rc = tpm_get_timeouts(chip);
+       if (rc)
+               goto init_irq_cleanup;
+
+       if (chip->flags & TPM_CHIP_FLAG_TPM2) {
                rc = tpm2_get_cc_attrs_tbl(chip);
                if (rc)
                        goto init_irq_cleanup;
index b92aa7d..51198b1 100644 (file)
@@ -41,7 +41,7 @@ struct ibmvtpm_dev {
        wait_queue_head_t wq;
        u16 res_len;
        u32 vtpm_version;
-       bool tpm_processing_cmd;
+       u8 tpm_processing_cmd;
 };
 
 #define CRQ_RES_BUF_SIZE       PAGE_SIZE
index 44dde2f..c892781 100644 (file)
@@ -639,12 +639,6 @@ static const struct tpm_class_ops cr50_i2c = {
        .req_canceled = &tpm_cr50_i2c_req_canceled,
 };
 
-static const struct i2c_device_id cr50_i2c_table[] = {
-       {"cr50_i2c", 0},
-       {}
-};
-MODULE_DEVICE_TABLE(i2c, cr50_i2c_table);
-
 #ifdef CONFIG_ACPI
 static const struct acpi_device_id cr50_i2c_acpi_id[] = {
        { "GOOG0005", 0 },
@@ -670,8 +664,7 @@ MODULE_DEVICE_TABLE(of, of_cr50_i2c_match);
  * - 0:                Success.
  * - -errno:   A POSIX error code.
  */
-static int tpm_cr50_i2c_probe(struct i2c_client *client,
-                             const struct i2c_device_id *id)
+static int tpm_cr50_i2c_probe(struct i2c_client *client)
 {
        struct tpm_i2c_cr50_priv_data *priv;
        struct device *dev = &client->dev;
@@ -774,8 +767,7 @@ static int tpm_cr50_i2c_remove(struct i2c_client *client)
 static SIMPLE_DEV_PM_OPS(cr50_i2c_pm, tpm_pm_suspend, tpm_pm_resume);
 
 static struct i2c_driver cr50_i2c_driver = {
-       .id_table = cr50_i2c_table,
-       .probe = tpm_cr50_i2c_probe,
+       .probe_new = tpm_cr50_i2c_probe,
        .remove = tpm_cr50_i2c_remove,
        .driver = {
                .name = "cr50_i2c",
index be16076..f9d5b73 100644 (file)
@@ -92,13 +92,20 @@ int __must_check devm_clk_bulk_get_optional(struct device *dev, int num_clks,
 }
 EXPORT_SYMBOL_GPL(devm_clk_bulk_get_optional);
 
+static void devm_clk_bulk_release_all(struct device *dev, void *res)
+{
+       struct clk_bulk_devres *devres = res;
+
+       clk_bulk_put_all(devres->num_clks, devres->clks);
+}
+
 int __must_check devm_clk_bulk_get_all(struct device *dev,
                                       struct clk_bulk_data **clks)
 {
        struct clk_bulk_devres *devres;
        int ret;
 
-       devres = devres_alloc(devm_clk_bulk_release,
+       devres = devres_alloc(devm_clk_bulk_release_all,
                              sizeof(*devres), GFP_KERNEL);
        if (!devres)
                return -ENOMEM;
index 18117ce..5c75e3d 100644 (file)
@@ -526,7 +526,7 @@ struct stm32f4_pll {
 
 struct stm32f4_pll_post_div_data {
        int idx;
-       u8 pll_num;
+       int pll_idx;
        const char *name;
        const char *parent;
        u8 flag;
@@ -557,13 +557,13 @@ static const struct clk_div_table post_divr_table[] = {
 
 #define MAX_POST_DIV 3
 static const struct stm32f4_pll_post_div_data  post_div_data[MAX_POST_DIV] = {
-       { CLK_I2SQ_PDIV, PLL_I2S, "plli2s-q-div", "plli2s-q",
+       { CLK_I2SQ_PDIV, PLL_VCO_I2S, "plli2s-q-div", "plli2s-q",
                CLK_SET_RATE_PARENT, STM32F4_RCC_DCKCFGR, 0, 5, 0, NULL},
 
-       { CLK_SAIQ_PDIV, PLL_SAI, "pllsai-q-div", "pllsai-q",
+       { CLK_SAIQ_PDIV, PLL_VCO_SAI, "pllsai-q-div", "pllsai-q",
                CLK_SET_RATE_PARENT, STM32F4_RCC_DCKCFGR, 8, 5, 0, NULL },
 
-       { NO_IDX, PLL_SAI, "pllsai-r-div", "pllsai-r", CLK_SET_RATE_PARENT,
+       { NO_IDX, PLL_VCO_SAI, "pllsai-r-div", "pllsai-r", CLK_SET_RATE_PARENT,
                STM32F4_RCC_DCKCFGR, 16, 2, 0, post_divr_table },
 };
 
@@ -1774,7 +1774,7 @@ static void __init stm32f4_rcc_init(struct device_node *np)
                                post_div->width,
                                post_div->flag_div,
                                post_div->div_table,
-                               clks[post_div->pll_num],
+                               clks[post_div->pll_idx],
                                &stm32f4_clk_lock);
 
                if (post_div->idx != NO_IDX)
index 5ecc37a..c1ec75a 100644 (file)
@@ -18,6 +18,7 @@ config COMMON_CLK_HI3519
 config COMMON_CLK_HI3559A
        bool "Hi3559A Clock Driver"
        depends on ARCH_HISI || COMPILE_TEST
+       select RESET_HISI
        default ARCH_HISI
        help
          Build the clock driver for hi3559a.
index 496900d..de36f58 100644 (file)
@@ -974,6 +974,6 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
                               hws[IMX6QDL_CLK_PLL3_USB_OTG]->clk);
        }
 
-       imx_register_uart_clocks(1);
+       imx_register_uart_clocks(2);
 }
 CLK_OF_DECLARE(imx6q, "fsl,imx6q-ccm", imx6q_clocks_init);
index 800b2fe..b2c142f 100644 (file)
@@ -467,7 +467,7 @@ DEFINE_CLK_SMD_RPM(msm8936, sysmmnoc_clk, sysmmnoc_a_clk, QCOM_SMD_RPM_BUS_CLK,
 
 static struct clk_smd_rpm *msm8936_clks[] = {
        [RPM_SMD_PCNOC_CLK]             = &msm8916_pcnoc_clk,
-       [RPM_SMD_PCNOC_A_CLK]           = &msm8916_pcnoc_clk,
+       [RPM_SMD_PCNOC_A_CLK]           = &msm8916_pcnoc_a_clk,
        [RPM_SMD_SNOC_CLK]              = &msm8916_snoc_clk,
        [RPM_SMD_SNOC_A_CLK]            = &msm8916_snoc_a_clk,
        [RPM_SMD_BIMC_CLK]              = &msm8916_bimc_clk,
index 51ed640..4ece326 100644 (file)
@@ -357,27 +357,43 @@ static int gdsc_init(struct gdsc *sc)
        if (on < 0)
                return on;
 
-       /*
-        * Votable GDSCs can be ON due to Vote from other masters.
-        * If a Votable GDSC is ON, make sure we have a Vote.
-        */
-       if ((sc->flags & VOTABLE) && on)
-               gdsc_enable(&sc->pd);
+       if (on) {
+               /* The regulator must be on, sync the kernel state */
+               if (sc->rsupply) {
+                       ret = regulator_enable(sc->rsupply);
+                       if (ret < 0)
+                               return ret;
+               }
 
-       /*
-        * Make sure the retain bit is set if the GDSC is already on, otherwise
-        * we end up turning off the GDSC and destroying all the register
-        * contents that we thought we were saving.
-        */
-       if ((sc->flags & RETAIN_FF_ENABLE) && on)
-               gdsc_retain_ff_on(sc);
+               /*
+                * Votable GDSCs can be ON due to Vote from other masters.
+                * If a Votable GDSC is ON, make sure we have a Vote.
+                */
+               if (sc->flags & VOTABLE) {
+                       ret = regmap_update_bits(sc->regmap, sc->gdscr,
+                                                SW_COLLAPSE_MASK, val);
+                       if (ret)
+                               return ret;
+               }
+
+               /* Turn on HW trigger mode if supported */
+               if (sc->flags & HW_CTRL) {
+                       ret = gdsc_hwctrl(sc, true);
+                       if (ret < 0)
+                               return ret;
+               }
 
-       /* If ALWAYS_ON GDSCs are not ON, turn them ON */
-       if (sc->flags & ALWAYS_ON) {
-               if (!on)
-                       gdsc_enable(&sc->pd);
+               /*
+                * Make sure the retain bit is set if the GDSC is already on,
+                * otherwise we end up turning off the GDSC and destroying all
+                * the register contents that we thought we were saving.
+                */
+               if (sc->flags & RETAIN_FF_ENABLE)
+                       gdsc_retain_ff_on(sc);
+       } else if (sc->flags & ALWAYS_ON) {
+               /* If ALWAYS_ON GDSCs are not ON, turn them ON */
+               gdsc_enable(&sc->pd);
                on = true;
-               sc->pd.flags |= GENPD_FLAG_ALWAYS_ON;
        }
 
        if (on || (sc->pwrsts & PWRSTS_RET))
@@ -385,6 +401,8 @@ static int gdsc_init(struct gdsc *sc)
        else
                gdsc_clear_mem_on(sc);
 
+       if (sc->flags & ALWAYS_ON)
+               sc->pd.flags |= GENPD_FLAG_ALWAYS_ON;
        if (!sc->pd.power_off)
                sc->pd.power_off = gdsc_disable;
        if (!sc->pd.power_on)
index 9fb79bd..684d893 100644 (file)
@@ -187,7 +187,7 @@ static int rcar_usb2_clock_sel_probe(struct platform_device *pdev)
        init.ops = &usb2_clock_sel_clock_ops;
        priv->hw.init = &init;
 
-       ret = devm_clk_hw_register(NULL, &priv->hw);
+       ret = devm_clk_hw_register(dev, &priv->hw);
        if (ret)
                goto pm_put;
 
index 316912d..4f2c330 100644 (file)
@@ -194,6 +194,15 @@ static void clk_sdmmc_mux_disable(struct clk_hw *hw)
        gate_ops->disable(gate_hw);
 }
 
+static void clk_sdmmc_mux_disable_unused(struct clk_hw *hw)
+{
+       struct tegra_sdmmc_mux *sdmmc_mux = to_clk_sdmmc_mux(hw);
+       const struct clk_ops *gate_ops = sdmmc_mux->gate_ops;
+       struct clk_hw *gate_hw = &sdmmc_mux->gate.hw;
+
+       gate_ops->disable_unused(gate_hw);
+}
+
 static void clk_sdmmc_mux_restore_context(struct clk_hw *hw)
 {
        struct clk_hw *parent = clk_hw_get_parent(hw);
@@ -218,6 +227,7 @@ static const struct clk_ops tegra_clk_sdmmc_mux_ops = {
        .is_enabled = clk_sdmmc_mux_is_enabled,
        .enable = clk_sdmmc_mux_enable,
        .disable = clk_sdmmc_mux_disable,
+       .disable_unused = clk_sdmmc_mux_disable_unused,
        .restore_context = clk_sdmmc_mux_restore_context,
 };
 
index fabad79..5e3e96d 100644 (file)
 
 #define TICK_BASE_CNT  1
 
+#ifdef CONFIG_ARM
+/* Use values higher than ARM arch timer. See 6282edb72bed. */
+#define MCT_CLKSOURCE_RATING           450
+#define MCT_CLKEVENTS_RATING           500
+#else
+#define MCT_CLKSOURCE_RATING           350
+#define MCT_CLKEVENTS_RATING           350
+#endif
+
 enum {
        MCT_INT_SPI,
        MCT_INT_PPI
@@ -206,7 +215,7 @@ static void exynos4_frc_resume(struct clocksource *cs)
 
 static struct clocksource mct_frc = {
        .name           = "mct-frc",
-       .rating         = 450,  /* use value higher than ARM arch timer */
+       .rating         = MCT_CLKSOURCE_RATING,
        .read           = exynos4_frc_read,
        .mask           = CLOCKSOURCE_MASK(32),
        .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
@@ -456,8 +465,9 @@ static int exynos4_mct_starting_cpu(unsigned int cpu)
        evt->set_state_oneshot = set_state_shutdown;
        evt->set_state_oneshot_stopped = set_state_shutdown;
        evt->tick_resume = set_state_shutdown;
-       evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
-       evt->rating = 500;      /* use value higher than ARM arch timer */
+       evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
+                       CLOCK_EVT_FEAT_PERCPU;
+       evt->rating = MCT_CLKEVENTS_RATING,
 
        exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET);
 
index a129840..cb6fc2f 100644 (file)
@@ -4,6 +4,7 @@
  * Copyright (c) 2020 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
  */
 
+#include <linux/bitfield.h>
 #include <linux/bitops.h>
 #include <linux/clk.h>
 #include <linux/clk-provider.h>
@@ -34,8 +35,6 @@
 /* bits within the OSTCCR register */
 #define OSTCCR_PRESCALE1_MASK  0x3
 #define OSTCCR_PRESCALE2_MASK  0xc
-#define OSTCCR_PRESCALE1_LSB   0
-#define OSTCCR_PRESCALE2_LSB   2
 
 /* bits within the OSTCR register */
 #define OSTCR_OST1CLR                  BIT(0)
@@ -98,7 +97,7 @@ static unsigned long ingenic_ost_percpu_timer_recalc_rate(struct clk_hw *hw,
 
        prescale = readl(ost_clk->ost->base + info->ostccr_reg);
 
-       prescale = (prescale & OSTCCR_PRESCALE1_MASK) >> OSTCCR_PRESCALE1_LSB;
+       prescale = FIELD_GET(OSTCCR_PRESCALE1_MASK, prescale);
 
        return parent_rate >> (prescale * 2);
 }
@@ -112,7 +111,7 @@ static unsigned long ingenic_ost_global_timer_recalc_rate(struct clk_hw *hw,
 
        prescale = readl(ost_clk->ost->base + info->ostccr_reg);
 
-       prescale = (prescale & OSTCCR_PRESCALE2_MASK) >> OSTCCR_PRESCALE2_LSB;
+       prescale = FIELD_GET(OSTCCR_PRESCALE2_MASK, prescale);
 
        return parent_rate >> (prescale * 2);
 }
@@ -151,7 +150,8 @@ static int ingenic_ost_percpu_timer_set_rate(struct clk_hw *hw, unsigned long re
        int val;
 
        val = readl(ost_clk->ost->base + info->ostccr_reg);
-       val = (val & ~OSTCCR_PRESCALE1_MASK) | (prescale << OSTCCR_PRESCALE1_LSB);
+       val &= ~OSTCCR_PRESCALE1_MASK;
+       val |= FIELD_PREP(OSTCCR_PRESCALE1_MASK, prescale);
        writel(val, ost_clk->ost->base + info->ostccr_reg);
 
        return 0;
@@ -166,7 +166,8 @@ static int ingenic_ost_global_timer_set_rate(struct clk_hw *hw, unsigned long re
        int val;
 
        val = readl(ost_clk->ost->base + info->ostccr_reg);
-       val = (val & ~OSTCCR_PRESCALE2_MASK) | (prescale << OSTCCR_PRESCALE2_LSB);
+       val &= ~OSTCCR_PRESCALE2_MASK;
+       val |= FIELD_PREP(OSTCCR_PRESCALE2_MASK, prescale);
        writel(val, ost_clk->ost->base + info->ostccr_reg);
 
        return 0;
index d7ed99f..dd0956a 100644 (file)
@@ -579,7 +579,8 @@ static int sh_cmt_start(struct sh_cmt_channel *ch, unsigned long flag)
        ch->flags |= flag;
 
        /* setup timeout if no clockevent */
-       if ((flag == FLAG_CLOCKSOURCE) && (!(ch->flags & FLAG_CLOCKEVENT)))
+       if (ch->cmt->num_channels == 1 &&
+           flag == FLAG_CLOCKSOURCE && (!(ch->flags & FLAG_CLOCKEVENT)))
                __sh_cmt_set_next(ch, ch->max_match_value);
  out:
        raw_spin_unlock_irqrestore(&ch->lock, flags);
@@ -621,20 +622,25 @@ static struct sh_cmt_channel *cs_to_sh_cmt(struct clocksource *cs)
 static u64 sh_cmt_clocksource_read(struct clocksource *cs)
 {
        struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
-       unsigned long flags;
        u32 has_wrapped;
-       u64 value;
-       u32 raw;
 
-       raw_spin_lock_irqsave(&ch->lock, flags);
-       value = ch->total_cycles;
-       raw = sh_cmt_get_counter(ch, &has_wrapped);
+       if (ch->cmt->num_channels == 1) {
+               unsigned long flags;
+               u64 value;
+               u32 raw;
 
-       if (unlikely(has_wrapped))
-               raw += ch->match_value + 1;
-       raw_spin_unlock_irqrestore(&ch->lock, flags);
+               raw_spin_lock_irqsave(&ch->lock, flags);
+               value = ch->total_cycles;
+               raw = sh_cmt_get_counter(ch, &has_wrapped);
+
+               if (unlikely(has_wrapped))
+                       raw += ch->match_value + 1;
+               raw_spin_unlock_irqrestore(&ch->lock, flags);
+
+               return value + raw;
+       }
 
-       return value + raw;
+       return sh_cmt_get_counter(ch, &has_wrapped);
 }
 
 static int sh_cmt_clocksource_enable(struct clocksource *cs)
@@ -697,7 +703,7 @@ static int sh_cmt_register_clocksource(struct sh_cmt_channel *ch,
        cs->disable = sh_cmt_clocksource_disable;
        cs->suspend = sh_cmt_clocksource_suspend;
        cs->resume = sh_cmt_clocksource_resume;
-       cs->mask = CLOCKSOURCE_MASK(sizeof(u64) * 8);
+       cs->mask = CLOCKSOURCE_MASK(ch->cmt->info->width);
        cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
 
        dev_info(&ch->cmt->pdev->dev, "ch%u: used as clock source\n",
index edb1d5f..126fb1f 100644 (file)
@@ -271,9 +271,7 @@ static irqreturn_t ast2600_timer_interrupt(int irq, void *dev_id)
 }
 
 static int __init fttmr010_common_init(struct device_node *np,
-               bool is_aspeed,
-               int (*timer_shutdown)(struct clock_event_device *),
-               irq_handler_t irq_handler)
+                                      bool is_aspeed, bool is_ast2600)
 {
        struct fttmr010 *fttmr010;
        int irq;
@@ -374,8 +372,6 @@ static int __init fttmr010_common_init(struct device_node *np,
                                     fttmr010->tick_rate);
        }
 
-       fttmr010->timer_shutdown = timer_shutdown;
-
        /*
         * Setup clockevent timer (interrupt-driven) on timer 1.
         */
@@ -383,8 +379,18 @@ static int __init fttmr010_common_init(struct device_node *np,
        writel(0, fttmr010->base + TIMER1_LOAD);
        writel(0, fttmr010->base + TIMER1_MATCH1);
        writel(0, fttmr010->base + TIMER1_MATCH2);
-       ret = request_irq(irq, irq_handler, IRQF_TIMER,
-                         "FTTMR010-TIMER1", &fttmr010->clkevt);
+
+       if (is_ast2600) {
+               fttmr010->timer_shutdown = ast2600_timer_shutdown;
+               ret = request_irq(irq, ast2600_timer_interrupt,
+                                 IRQF_TIMER, "FTTMR010-TIMER1",
+                                 &fttmr010->clkevt);
+       } else {
+               fttmr010->timer_shutdown = fttmr010_timer_shutdown;
+               ret = request_irq(irq, fttmr010_timer_interrupt,
+                                 IRQF_TIMER, "FTTMR010-TIMER1",
+                                 &fttmr010->clkevt);
+       }
        if (ret) {
                pr_err("FTTMR010-TIMER1 no IRQ\n");
                goto out_unmap;
@@ -432,23 +438,17 @@ out_disable_clock:
 
 static __init int ast2600_timer_init(struct device_node *np)
 {
-       return fttmr010_common_init(np, true,
-                       ast2600_timer_shutdown,
-                       ast2600_timer_interrupt);
+       return fttmr010_common_init(np, true, true);
 }
 
 static __init int aspeed_timer_init(struct device_node *np)
 {
-       return fttmr010_common_init(np, true,
-                       fttmr010_timer_shutdown,
-                       fttmr010_timer_interrupt);
+       return fttmr010_common_init(np, true, false);
 }
 
 static __init int fttmr010_timer_init(struct device_node *np)
 {
-       return fttmr010_common_init(np, false,
-                       fttmr010_timer_shutdown,
-                       fttmr010_timer_interrupt);
+       return fttmr010_common_init(np, false, false);
 }
 
 TIMER_OF_DECLARE(fttmr010, "faraday,fttmr010", fttmr010_timer_init);
index ab63b95..7bcb4a3 100644 (file)
@@ -60,9 +60,9 @@
  * SYST_CON_EN: Clock enable. Shall be set to
  *   - Start timer countdown.
  *   - Allow timeout ticks being updated.
- *   - Allow changing interrupt functions.
+ *   - Allow changing interrupt status,like clear irq pending.
  *
- * SYST_CON_IRQ_EN: Set to allow interrupt.
+ * SYST_CON_IRQ_EN: Set to enable interrupt.
  *
  * SYST_CON_IRQ_CLR: Set to clear interrupt.
  */
@@ -75,6 +75,7 @@ static void __iomem *gpt_sched_reg __read_mostly;
 static void mtk_syst_ack_irq(struct timer_of *to)
 {
        /* Clear and disable interrupt */
+       writel(SYST_CON_EN, SYST_CON_REG(to));
        writel(SYST_CON_IRQ_CLR | SYST_CON_EN, SYST_CON_REG(to));
 }
 
@@ -111,6 +112,9 @@ static int mtk_syst_clkevt_next_event(unsigned long ticks,
 
 static int mtk_syst_clkevt_shutdown(struct clock_event_device *clkevt)
 {
+       /* Clear any irq */
+       mtk_syst_ack_irq(to_timer_of(clkevt));
+
        /* Disable timer */
        writel(0, SYST_CON_REG(to_timer_of(clkevt)));
 
index 3fc98a3..c10fc33 100644 (file)
@@ -104,7 +104,11 @@ struct armada_37xx_dvfs {
 };
 
 static struct armada_37xx_dvfs armada_37xx_dvfs[] = {
-       {.cpu_freq_max = 1200*1000*1000, .divider = {1, 2, 4, 6} },
+       /*
+        * The cpufreq scaling for 1.2 GHz variant of the SOC is currently
+        * unstable because we do not know how to configure it properly.
+        */
+       /* {.cpu_freq_max = 1200*1000*1000, .divider = {1, 2, 4, 6} }, */
        {.cpu_freq_max = 1000*1000*1000, .divider = {1, 2, 4, 5} },
        {.cpu_freq_max = 800*1000*1000,  .divider = {1, 2, 3, 4} },
        {.cpu_freq_max = 600*1000*1000,  .divider = {2, 4, 5, 6} },
index bef7528..231e585 100644 (file)
@@ -139,7 +139,9 @@ static const struct of_device_id blocklist[] __initconst = {
        { .compatible = "qcom,qcs404", },
        { .compatible = "qcom,sc7180", },
        { .compatible = "qcom,sc7280", },
+       { .compatible = "qcom,sc8180x", },
        { .compatible = "qcom,sdm845", },
+       { .compatible = "qcom,sm8150", },
 
        { .compatible = "st,stih407", },
        { .compatible = "st,stih410", },
index ec9a87c..75f818d 100644 (file)
@@ -134,7 +134,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
        }
 
        if (!zalloc_cpumask_var(&opp_shared_cpus, GFP_KERNEL))
-               ret = -ENOMEM;
+               return -ENOMEM;
 
        /* Obtain CPUs that share SCMI performance controls */
        ret = scmi_get_sharing_cpus(cpu_dev, policy->cpus);
index 7b91060..d9262db 100644 (file)
@@ -382,8 +382,8 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
        alt_intercepts = 2 * idx_intercept_sum > cpu_data->total - idx_hit_sum;
        alt_recent = idx_recent_sum > NR_RECENT / 2;
        if (alt_recent || alt_intercepts) {
-               s64 last_enabled_span_ns = duration_ns;
-               int last_enabled_idx = idx;
+               s64 first_suitable_span_ns = duration_ns;
+               int first_suitable_idx = idx;
 
                /*
                 * Look for the deepest idle state whose target residency had
@@ -397,37 +397,51 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
                intercept_sum = 0;
                recent_sum = 0;
 
-               for (i = idx - 1; i >= idx0; i--) {
+               for (i = idx - 1; i >= 0; i--) {
                        struct teo_bin *bin = &cpu_data->state_bins[i];
                        s64 span_ns;
 
                        intercept_sum += bin->intercepts;
                        recent_sum += bin->recent;
 
+                       span_ns = teo_middle_of_bin(i, drv);
+
+                       if ((!alt_recent || 2 * recent_sum > idx_recent_sum) &&
+                           (!alt_intercepts ||
+                            2 * intercept_sum > idx_intercept_sum)) {
+                               if (teo_time_ok(span_ns) &&
+                                   !dev->states_usage[i].disable) {
+                                       idx = i;
+                                       duration_ns = span_ns;
+                               } else {
+                                       /*
+                                        * The current state is too shallow or
+                                        * disabled, so take the first enabled
+                                        * deeper state with suitable time span.
+                                        */
+                                       idx = first_suitable_idx;
+                                       duration_ns = first_suitable_span_ns;
+                               }
+                               break;
+                       }
+
                        if (dev->states_usage[i].disable)
                                continue;
 
-                       span_ns = teo_middle_of_bin(i, drv);
                        if (!teo_time_ok(span_ns)) {
                                /*
-                                * The current state is too shallow, so select
-                                * the first enabled deeper state.
+                                * The current state is too shallow, but if an
+                                * alternative candidate state has been found,
+                                * it may still turn out to be a better choice.
                                 */
-                               duration_ns = last_enabled_span_ns;
-                               idx = last_enabled_idx;
-                               break;
-                       }
+                               if (first_suitable_idx != idx)
+                                       continue;
 
-                       if ((!alt_recent || 2 * recent_sum > idx_recent_sum) &&
-                           (!alt_intercepts ||
-                            2 * intercept_sum > idx_intercept_sum)) {
-                               idx = i;
-                               duration_ns = span_ns;
                                break;
                        }
 
-                       last_enabled_span_ns = span_ns;
-                       last_enabled_idx = i;
+                       first_suitable_span_ns = span_ns;
+                       first_suitable_idx = i;
                }
        }
 
index cd1baee..b3a9bbf 100644 (file)
@@ -26,8 +26,7 @@ void sun8i_ce_prng_exit(struct crypto_tfm *tfm)
 {
        struct sun8i_ce_rng_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
 
-       memzero_explicit(ctx->seed, ctx->slen);
-       kfree(ctx->seed);
+       kfree_sensitive(ctx->seed);
        ctx->seed = NULL;
        ctx->slen = 0;
 }
@@ -38,8 +37,7 @@ int sun8i_ce_prng_seed(struct crypto_rng *tfm, const u8 *seed,
        struct sun8i_ce_rng_tfm_ctx *ctx = crypto_rng_ctx(tfm);
 
        if (ctx->seed && ctx->slen != slen) {
-               memzero_explicit(ctx->seed, ctx->slen);
-               kfree(ctx->seed);
+               kfree_sensitive(ctx->seed);
                ctx->slen = 0;
                ctx->seed = NULL;
        }
@@ -157,9 +155,8 @@ err_dst:
                memcpy(dst, d, dlen);
                memcpy(ctx->seed, d + dlen, ctx->slen);
        }
-       memzero_explicit(d, todo);
 err_iv:
-       kfree(d);
+       kfree_sensitive(d);
 err_mem:
        return err;
 }
index 5b7af44..19cd2e5 100644 (file)
@@ -95,9 +95,8 @@ err_pm:
                memcpy(data, d, max);
                err = max;
        }
-       memzero_explicit(d, todo);
 err_dst:
-       kfree(d);
+       kfree_sensitive(d);
        return err;
 }
 
index 3191527..246a678 100644 (file)
@@ -20,8 +20,7 @@ int sun8i_ss_prng_seed(struct crypto_rng *tfm, const u8 *seed,
        struct sun8i_ss_rng_tfm_ctx *ctx = crypto_rng_ctx(tfm);
 
        if (ctx->seed && ctx->slen != slen) {
-               memzero_explicit(ctx->seed, ctx->slen);
-               kfree(ctx->seed);
+               kfree_sensitive(ctx->seed);
                ctx->slen = 0;
                ctx->seed = NULL;
        }
@@ -48,8 +47,7 @@ void sun8i_ss_prng_exit(struct crypto_tfm *tfm)
 {
        struct sun8i_ss_rng_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
 
-       memzero_explicit(ctx->seed, ctx->slen);
-       kfree(ctx->seed);
+       kfree_sensitive(ctx->seed);
        ctx->seed = NULL;
        ctx->slen = 0;
 }
@@ -167,9 +165,8 @@ err_iv:
                /* Update seed */
                memcpy(ctx->seed, d + dlen, ctx->slen);
        }
-       memzero_explicit(d, todo);
 err_free:
-       kfree(d);
+       kfree_sensitive(d);
 
        return err;
 }
index b1d2860..9391ccc 100644 (file)
@@ -143,6 +143,7 @@ struct atmel_aes_xts_ctx {
        struct atmel_aes_base_ctx       base;
 
        u32                     key2[AES_KEYSIZE_256 / sizeof(u32)];
+       struct crypto_skcipher *fallback_tfm;
 };
 
 #if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
@@ -155,6 +156,7 @@ struct atmel_aes_authenc_ctx {
 struct atmel_aes_reqctx {
        unsigned long           mode;
        u8                      lastc[AES_BLOCK_SIZE];
+       struct skcipher_request fallback_req;
 };
 
 #if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
@@ -418,24 +420,15 @@ static inline size_t atmel_aes_padlen(size_t len, size_t block_size)
        return len ? block_size - len : 0;
 }
 
-static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_base_ctx *ctx)
+static struct atmel_aes_dev *atmel_aes_dev_alloc(struct atmel_aes_base_ctx *ctx)
 {
-       struct atmel_aes_dev *aes_dd = NULL;
-       struct atmel_aes_dev *tmp;
+       struct atmel_aes_dev *aes_dd;
 
        spin_lock_bh(&atmel_aes.lock);
-       if (!ctx->dd) {
-               list_for_each_entry(tmp, &atmel_aes.dev_list, list) {
-                       aes_dd = tmp;
-                       break;
-               }
-               ctx->dd = aes_dd;
-       } else {
-               aes_dd = ctx->dd;
-       }
-
+       /* One AES IP per SoC. */
+       aes_dd = list_first_entry_or_null(&atmel_aes.dev_list,
+                                         struct atmel_aes_dev, list);
        spin_unlock_bh(&atmel_aes.lock);
-
        return aes_dd;
 }
 
@@ -967,7 +960,6 @@ static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
        ctx = crypto_tfm_ctx(areq->tfm);
 
        dd->areq = areq;
-       dd->ctx = ctx;
        start_async = (areq != new_areq);
        dd->is_async = start_async;
 
@@ -1083,12 +1075,48 @@ static int atmel_aes_ctr_start(struct atmel_aes_dev *dd)
        return atmel_aes_ctr_transfer(dd);
 }
 
+static int atmel_aes_xts_fallback(struct skcipher_request *req, bool enc)
+{
+       struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
+       struct atmel_aes_xts_ctx *ctx = crypto_skcipher_ctx(
+                       crypto_skcipher_reqtfm(req));
+
+       skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+       skcipher_request_set_callback(&rctx->fallback_req, req->base.flags,
+                                     req->base.complete, req->base.data);
+       skcipher_request_set_crypt(&rctx->fallback_req, req->src, req->dst,
+                                  req->cryptlen, req->iv);
+
+       return enc ? crypto_skcipher_encrypt(&rctx->fallback_req) :
+                    crypto_skcipher_decrypt(&rctx->fallback_req);
+}
+
 static int atmel_aes_crypt(struct skcipher_request *req, unsigned long mode)
 {
        struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
        struct atmel_aes_base_ctx *ctx = crypto_skcipher_ctx(skcipher);
        struct atmel_aes_reqctx *rctx;
-       struct atmel_aes_dev *dd;
+       u32 opmode = mode & AES_FLAGS_OPMODE_MASK;
+
+       if (opmode == AES_FLAGS_XTS) {
+               if (req->cryptlen < XTS_BLOCK_SIZE)
+                       return -EINVAL;
+
+               if (!IS_ALIGNED(req->cryptlen, XTS_BLOCK_SIZE))
+                       return atmel_aes_xts_fallback(req,
+                                                     mode & AES_FLAGS_ENCRYPT);
+       }
+
+       /*
+        * ECB, CBC, CFB, OFB or CTR mode require the plaintext and ciphertext
+        * to have a positve integer length.
+        */
+       if (!req->cryptlen && opmode != AES_FLAGS_XTS)
+               return 0;
+
+       if ((opmode == AES_FLAGS_ECB || opmode == AES_FLAGS_CBC) &&
+           !IS_ALIGNED(req->cryptlen, crypto_skcipher_blocksize(skcipher)))
+               return -EINVAL;
 
        switch (mode & AES_FLAGS_OPMODE_MASK) {
        case AES_FLAGS_CFB8:
@@ -1113,14 +1141,10 @@ static int atmel_aes_crypt(struct skcipher_request *req, unsigned long mode)
        }
        ctx->is_aead = false;
 
-       dd = atmel_aes_find_dev(ctx);
-       if (!dd)
-               return -ENODEV;
-
        rctx = skcipher_request_ctx(req);
        rctx->mode = mode;
 
-       if ((mode & AES_FLAGS_OPMODE_MASK) != AES_FLAGS_ECB &&
+       if (opmode != AES_FLAGS_ECB &&
            !(mode & AES_FLAGS_ENCRYPT) && req->src == req->dst) {
                unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
 
@@ -1130,7 +1154,7 @@ static int atmel_aes_crypt(struct skcipher_request *req, unsigned long mode)
                                                 ivsize, 0);
        }
 
-       return atmel_aes_handle_queue(dd, &req->base);
+       return atmel_aes_handle_queue(ctx->dd, &req->base);
 }
 
 static int atmel_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
@@ -1242,8 +1266,15 @@ static int atmel_aes_ctr_decrypt(struct skcipher_request *req)
 static int atmel_aes_init_tfm(struct crypto_skcipher *tfm)
 {
        struct atmel_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
+       struct atmel_aes_dev *dd;
+
+       dd = atmel_aes_dev_alloc(&ctx->base);
+       if (!dd)
+               return -ENODEV;
 
        crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
+       ctx->base.dd = dd;
+       ctx->base.dd->ctx = &ctx->base;
        ctx->base.start = atmel_aes_start;
 
        return 0;
@@ -1252,8 +1283,15 @@ static int atmel_aes_init_tfm(struct crypto_skcipher *tfm)
 static int atmel_aes_ctr_init_tfm(struct crypto_skcipher *tfm)
 {
        struct atmel_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
+       struct atmel_aes_dev *dd;
+
+       dd = atmel_aes_dev_alloc(&ctx->base);
+       if (!dd)
+               return -ENODEV;
 
        crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
+       ctx->base.dd = dd;
+       ctx->base.dd->ctx = &ctx->base;
        ctx->base.start = atmel_aes_ctr_start;
 
        return 0;
@@ -1290,7 +1328,7 @@ static struct skcipher_alg aes_algs[] = {
 {
        .base.cra_name          = "ofb(aes)",
        .base.cra_driver_name   = "atmel-ofb-aes",
-       .base.cra_blocksize     = AES_BLOCK_SIZE,
+       .base.cra_blocksize     = 1,
        .base.cra_ctxsize       = sizeof(struct atmel_aes_ctx),
 
        .init                   = atmel_aes_init_tfm,
@@ -1691,20 +1729,15 @@ static int atmel_aes_gcm_crypt(struct aead_request *req,
 {
        struct atmel_aes_base_ctx *ctx;
        struct atmel_aes_reqctx *rctx;
-       struct atmel_aes_dev *dd;
 
        ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
        ctx->block_size = AES_BLOCK_SIZE;
        ctx->is_aead = true;
 
-       dd = atmel_aes_find_dev(ctx);
-       if (!dd)
-               return -ENODEV;
-
        rctx = aead_request_ctx(req);
        rctx->mode = AES_FLAGS_GCM | mode;
 
-       return atmel_aes_handle_queue(dd, &req->base);
+       return atmel_aes_handle_queue(ctx->dd, &req->base);
 }
 
 static int atmel_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
@@ -1742,8 +1775,15 @@ static int atmel_aes_gcm_decrypt(struct aead_request *req)
 static int atmel_aes_gcm_init(struct crypto_aead *tfm)
 {
        struct atmel_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm);
+       struct atmel_aes_dev *dd;
+
+       dd = atmel_aes_dev_alloc(&ctx->base);
+       if (!dd)
+               return -ENODEV;
 
        crypto_aead_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
+       ctx->base.dd = dd;
+       ctx->base.dd->ctx = &ctx->base;
        ctx->base.start = atmel_aes_gcm_start;
 
        return 0;
@@ -1819,12 +1859,8 @@ static int atmel_aes_xts_process_data(struct atmel_aes_dev *dd)
         * the order of the ciphered tweak bytes need to be reversed before
         * writing them into the ODATARx registers.
         */
-       for (i = 0; i < AES_BLOCK_SIZE/2; ++i) {
-               u8 tmp = tweak_bytes[AES_BLOCK_SIZE - 1 - i];
-
-               tweak_bytes[AES_BLOCK_SIZE - 1 - i] = tweak_bytes[i];
-               tweak_bytes[i] = tmp;
-       }
+       for (i = 0; i < AES_BLOCK_SIZE/2; ++i)
+               swap(tweak_bytes[i], tweak_bytes[AES_BLOCK_SIZE - 1 - i]);
 
        /* Process the data. */
        atmel_aes_write_ctrl(dd, use_dma, NULL);
@@ -1849,6 +1885,13 @@ static int atmel_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
        if (err)
                return err;
 
+       crypto_skcipher_clear_flags(ctx->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+       crypto_skcipher_set_flags(ctx->fallback_tfm, tfm->base.crt_flags &
+                                 CRYPTO_TFM_REQ_MASK);
+       err = crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
+       if (err)
+               return err;
+
        memcpy(ctx->base.key, key, keylen/2);
        memcpy(ctx->key2, key + keylen/2, keylen/2);
        ctx->base.keylen = keylen/2;
@@ -1869,18 +1912,40 @@ static int atmel_aes_xts_decrypt(struct skcipher_request *req)
 static int atmel_aes_xts_init_tfm(struct crypto_skcipher *tfm)
 {
        struct atmel_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
+       struct atmel_aes_dev *dd;
+       const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
 
-       crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
+       dd = atmel_aes_dev_alloc(&ctx->base);
+       if (!dd)
+               return -ENODEV;
+
+       ctx->fallback_tfm = crypto_alloc_skcipher(tfm_name, 0,
+                                                 CRYPTO_ALG_NEED_FALLBACK);
+       if (IS_ERR(ctx->fallback_tfm))
+               return PTR_ERR(ctx->fallback_tfm);
+
+       crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx) +
+                                   crypto_skcipher_reqsize(ctx->fallback_tfm));
+       ctx->base.dd = dd;
+       ctx->base.dd->ctx = &ctx->base;
        ctx->base.start = atmel_aes_xts_start;
 
        return 0;
 }
 
+static void atmel_aes_xts_exit_tfm(struct crypto_skcipher *tfm)
+{
+       struct atmel_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+       crypto_free_skcipher(ctx->fallback_tfm);
+}
+
 static struct skcipher_alg aes_xts_alg = {
        .base.cra_name          = "xts(aes)",
        .base.cra_driver_name   = "atmel-xts-aes",
        .base.cra_blocksize     = AES_BLOCK_SIZE,
        .base.cra_ctxsize       = sizeof(struct atmel_aes_xts_ctx),
+       .base.cra_flags         = CRYPTO_ALG_NEED_FALLBACK,
 
        .min_keysize            = 2 * AES_MIN_KEY_SIZE,
        .max_keysize            = 2 * AES_MAX_KEY_SIZE,
@@ -1889,6 +1954,7 @@ static struct skcipher_alg aes_xts_alg = {
        .encrypt                = atmel_aes_xts_encrypt,
        .decrypt                = atmel_aes_xts_decrypt,
        .init                   = atmel_aes_xts_init_tfm,
+       .exit                   = atmel_aes_xts_exit_tfm,
 };
 
 #if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
@@ -2075,6 +2141,11 @@ static int atmel_aes_authenc_init_tfm(struct crypto_aead *tfm,
 {
        struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm);
        unsigned int auth_reqsize = atmel_sha_authenc_get_reqsize();
+       struct atmel_aes_dev *dd;
+
+       dd = atmel_aes_dev_alloc(&ctx->base);
+       if (!dd)
+               return -ENODEV;
 
        ctx->auth = atmel_sha_authenc_spawn(auth_mode);
        if (IS_ERR(ctx->auth))
@@ -2082,6 +2153,8 @@ static int atmel_aes_authenc_init_tfm(struct crypto_aead *tfm,
 
        crypto_aead_set_reqsize(tfm, (sizeof(struct atmel_aes_authenc_reqctx) +
                                      auth_reqsize));
+       ctx->base.dd = dd;
+       ctx->base.dd->ctx = &ctx->base;
        ctx->base.start = atmel_aes_authenc_start;
 
        return 0;
@@ -2127,7 +2200,6 @@ static int atmel_aes_authenc_crypt(struct aead_request *req,
        struct atmel_aes_base_ctx *ctx = crypto_aead_ctx(tfm);
        u32 authsize = crypto_aead_authsize(tfm);
        bool enc = (mode & AES_FLAGS_ENCRYPT);
-       struct atmel_aes_dev *dd;
 
        /* Compute text length. */
        if (!enc && req->cryptlen < authsize)
@@ -2146,11 +2218,7 @@ static int atmel_aes_authenc_crypt(struct aead_request *req,
        ctx->block_size = AES_BLOCK_SIZE;
        ctx->is_aead = true;
 
-       dd = atmel_aes_find_dev(ctx);
-       if (!dd)
-               return -ENODEV;
-
-       return atmel_aes_handle_queue(dd, &req->base);
+       return atmel_aes_handle_queue(ctx->dd, &req->base);
 }
 
 static int atmel_aes_authenc_cbc_aes_encrypt(struct aead_request *req)
@@ -2358,7 +2426,7 @@ static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
 
 static void atmel_aes_crypto_alg_init(struct crypto_alg *alg)
 {
-       alg->cra_flags = CRYPTO_ALG_ASYNC;
+       alg->cra_flags |= CRYPTO_ALG_ASYNC;
        alg->cra_alignmask = 0xf;
        alg->cra_priority = ATMEL_AES_PRIORITY;
        alg->cra_module = THIS_MODULE;
index 6f01c51..e30786e 100644 (file)
@@ -196,23 +196,15 @@ static void atmel_tdes_write_n(struct atmel_tdes_dev *dd, u32 offset,
                atmel_tdes_write(dd, offset, *value);
 }
 
-static struct atmel_tdes_dev *atmel_tdes_find_dev(struct atmel_tdes_ctx *ctx)
+static struct atmel_tdes_dev *atmel_tdes_dev_alloc(void)
 {
-       struct atmel_tdes_dev *tdes_dd = NULL;
-       struct atmel_tdes_dev *tmp;
+       struct atmel_tdes_dev *tdes_dd;
 
        spin_lock_bh(&atmel_tdes.lock);
-       if (!ctx->dd) {
-               list_for_each_entry(tmp, &atmel_tdes.dev_list, list) {
-                       tdes_dd = tmp;
-                       break;
-               }
-               ctx->dd = tdes_dd;
-       } else {
-               tdes_dd = ctx->dd;
-       }
+       /* One TDES IP per SoC. */
+       tdes_dd = list_first_entry_or_null(&atmel_tdes.dev_list,
+                                          struct atmel_tdes_dev, list);
        spin_unlock_bh(&atmel_tdes.lock);
-
        return tdes_dd;
 }
 
@@ -320,7 +312,7 @@ static int atmel_tdes_crypt_pdc_stop(struct atmel_tdes_dev *dd)
                                dd->buf_out, dd->buflen, dd->dma_size, 1);
                if (count != dd->dma_size) {
                        err = -EINVAL;
-                       pr_err("not all data converted: %zu\n", count);
+                       dev_dbg(dd->dev, "not all data converted: %zu\n", count);
                }
        }
 
@@ -337,24 +329,24 @@ static int atmel_tdes_buff_init(struct atmel_tdes_dev *dd)
        dd->buflen &= ~(DES_BLOCK_SIZE - 1);
 
        if (!dd->buf_in || !dd->buf_out) {
-               dev_err(dd->dev, "unable to alloc pages.\n");
+               dev_dbg(dd->dev, "unable to alloc pages.\n");
                goto err_alloc;
        }
 
        /* MAP here */
        dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in,
                                        dd->buflen, DMA_TO_DEVICE);
-       if (dma_mapping_error(dd->dev, dd->dma_addr_in)) {
-               dev_err(dd->dev, "dma %zd bytes error\n", dd->buflen);
-               err = -EINVAL;
+       err = dma_mapping_error(dd->dev, dd->dma_addr_in);
+       if (err) {
+               dev_dbg(dd->dev, "dma %zd bytes error\n", dd->buflen);
                goto err_map_in;
        }
 
        dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out,
                                        dd->buflen, DMA_FROM_DEVICE);
-       if (dma_mapping_error(dd->dev, dd->dma_addr_out)) {
-               dev_err(dd->dev, "dma %zd bytes error\n", dd->buflen);
-               err = -EINVAL;
+       err = dma_mapping_error(dd->dev, dd->dma_addr_out);
+       if (err) {
+               dev_dbg(dd->dev, "dma %zd bytes error\n", dd->buflen);
                goto err_map_out;
        }
 
@@ -367,8 +359,6 @@ err_map_in:
 err_alloc:
        free_page((unsigned long)dd->buf_out);
        free_page((unsigned long)dd->buf_in);
-       if (err)
-               pr_err("error: %d\n", err);
        return err;
 }
 
@@ -520,14 +510,14 @@ static int atmel_tdes_crypt_start(struct atmel_tdes_dev *dd)
 
                err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
                if (!err) {
-                       dev_err(dd->dev, "dma_map_sg() error\n");
+                       dev_dbg(dd->dev, "dma_map_sg() error\n");
                        return -EINVAL;
                }
 
                err = dma_map_sg(dd->dev, dd->out_sg, 1,
                                DMA_FROM_DEVICE);
                if (!err) {
-                       dev_err(dd->dev, "dma_map_sg() error\n");
+                       dev_dbg(dd->dev, "dma_map_sg() error\n");
                        dma_unmap_sg(dd->dev, dd->in_sg, 1,
                                DMA_TO_DEVICE);
                        return -EINVAL;
@@ -646,7 +636,6 @@ static int atmel_tdes_handle_queue(struct atmel_tdes_dev *dd,
        rctx->mode &= TDES_FLAGS_MODE_MASK;
        dd->flags = (dd->flags & ~TDES_FLAGS_MODE_MASK) | rctx->mode;
        dd->ctx = ctx;
-       ctx->dd = dd;
 
        err = atmel_tdes_write_ctrl(dd);
        if (!err)
@@ -679,7 +668,7 @@ static int atmel_tdes_crypt_dma_stop(struct atmel_tdes_dev *dd)
                                dd->buf_out, dd->buflen, dd->dma_size, 1);
                        if (count != dd->dma_size) {
                                err = -EINVAL;
-                               pr_err("not all data converted: %zu\n", count);
+                               dev_dbg(dd->dev, "not all data converted: %zu\n", count);
                        }
                }
        }
@@ -691,11 +680,15 @@ static int atmel_tdes_crypt(struct skcipher_request *req, unsigned long mode)
        struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
        struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(skcipher);
        struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
+       struct device *dev = ctx->dd->dev;
+
+       if (!req->cryptlen)
+               return 0;
 
        switch (mode & TDES_FLAGS_OPMODE_MASK) {
        case TDES_FLAGS_CFB8:
                if (!IS_ALIGNED(req->cryptlen, CFB8_BLOCK_SIZE)) {
-                       pr_err("request size is not exact amount of CFB8 blocks\n");
+                       dev_dbg(dev, "request size is not exact amount of CFB8 blocks\n");
                        return -EINVAL;
                }
                ctx->block_size = CFB8_BLOCK_SIZE;
@@ -703,7 +696,7 @@ static int atmel_tdes_crypt(struct skcipher_request *req, unsigned long mode)
 
        case TDES_FLAGS_CFB16:
                if (!IS_ALIGNED(req->cryptlen, CFB16_BLOCK_SIZE)) {
-                       pr_err("request size is not exact amount of CFB16 blocks\n");
+                       dev_dbg(dev, "request size is not exact amount of CFB16 blocks\n");
                        return -EINVAL;
                }
                ctx->block_size = CFB16_BLOCK_SIZE;
@@ -711,7 +704,7 @@ static int atmel_tdes_crypt(struct skcipher_request *req, unsigned long mode)
 
        case TDES_FLAGS_CFB32:
                if (!IS_ALIGNED(req->cryptlen, CFB32_BLOCK_SIZE)) {
-                       pr_err("request size is not exact amount of CFB32 blocks\n");
+                       dev_dbg(dev, "request size is not exact amount of CFB32 blocks\n");
                        return -EINVAL;
                }
                ctx->block_size = CFB32_BLOCK_SIZE;
@@ -719,7 +712,7 @@ static int atmel_tdes_crypt(struct skcipher_request *req, unsigned long mode)
 
        default:
                if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE)) {
-                       pr_err("request size is not exact amount of DES blocks\n");
+                       dev_dbg(dev, "request size is not exact amount of DES blocks\n");
                        return -EINVAL;
                }
                ctx->block_size = DES_BLOCK_SIZE;
@@ -897,14 +890,13 @@ static int atmel_tdes_ofb_decrypt(struct skcipher_request *req)
 static int atmel_tdes_init_tfm(struct crypto_skcipher *tfm)
 {
        struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(tfm);
-       struct atmel_tdes_dev *dd;
-
-       crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_tdes_reqctx));
 
-       dd = atmel_tdes_find_dev(ctx);
-       if (!dd)
+       ctx->dd = atmel_tdes_dev_alloc();
+       if (!ctx->dd)
                return -ENODEV;
 
+       crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_tdes_reqctx));
+
        return 0;
 }
 
@@ -999,7 +991,7 @@ static struct skcipher_alg tdes_algs[] = {
 {
        .base.cra_name          = "ofb(des)",
        .base.cra_driver_name   = "atmel-ofb-des",
-       .base.cra_blocksize     = DES_BLOCK_SIZE,
+       .base.cra_blocksize     = 1,
        .base.cra_alignmask     = 0x7,
 
        .min_keysize            = DES_KEY_SIZE,
index 9180840..2ecb0e1 100644 (file)
@@ -300,6 +300,9 @@ static int __sev_platform_shutdown_locked(int *error)
        struct sev_device *sev = psp_master->sev_data;
        int ret;
 
+       if (sev->state == SEV_STATE_UNINIT)
+               return 0;
+
        ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error);
        if (ret)
                return ret;
@@ -1019,6 +1022,20 @@ e_err:
        return ret;
 }
 
+static void sev_firmware_shutdown(struct sev_device *sev)
+{
+       sev_platform_shutdown(NULL);
+
+       if (sev_es_tmr) {
+               /* The TMR area was encrypted, flush it from the cache */
+               wbinvd_on_all_cpus();
+
+               free_pages((unsigned long)sev_es_tmr,
+                          get_order(SEV_ES_TMR_SIZE));
+               sev_es_tmr = NULL;
+       }
+}
+
 void sev_dev_destroy(struct psp_device *psp)
 {
        struct sev_device *sev = psp->sev_data;
@@ -1026,6 +1043,8 @@ void sev_dev_destroy(struct psp_device *psp)
        if (!sev)
                return;
 
+       sev_firmware_shutdown(sev);
+
        if (sev->misc)
                kref_put(&misc_dev->refcount, sev_exit);
 
@@ -1056,21 +1075,6 @@ void sev_pci_init(void)
        if (sev_get_api_version())
                goto err;
 
-       /*
-        * If platform is not in UNINIT state then firmware upgrade and/or
-        * platform INIT command will fail. These command require UNINIT state.
-        *
-        * In a normal boot we should never run into case where the firmware
-        * is not in UNINIT state on boot. But in case of kexec boot, a reboot
-        * may not go through a typical shutdown sequence and may leave the
-        * firmware in INIT or WORKING state.
-        */
-
-       if (sev->state != SEV_STATE_UNINIT) {
-               sev_platform_shutdown(NULL);
-               sev->state = SEV_STATE_UNINIT;
-       }
-
        if (sev_version_greater_or_equal(0, 15) &&
            sev_update_firmware(sev->dev) == 0)
                sev_get_api_version();
@@ -1115,17 +1119,10 @@ err:
 
 void sev_pci_exit(void)
 {
-       if (!psp_master->sev_data)
-               return;
-
-       sev_platform_shutdown(NULL);
+       struct sev_device *sev = psp_master->sev_data;
 
-       if (sev_es_tmr) {
-               /* The TMR area was encrypted, flush it from the cache */
-               wbinvd_on_all_cpus();
+       if (!sev)
+               return;
 
-               free_pages((unsigned long)sev_es_tmr,
-                          get_order(SEV_ES_TMR_SIZE));
-               sev_es_tmr = NULL;
-       }
+       sev_firmware_shutdown(sev);
 }
index 6fb6ba3..88c672a 100644 (file)
@@ -241,6 +241,17 @@ e_err:
        return ret;
 }
 
+static void sp_pci_shutdown(struct pci_dev *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct sp_device *sp = dev_get_drvdata(dev);
+
+       if (!sp)
+               return;
+
+       sp_destroy(sp);
+}
+
 static void sp_pci_remove(struct pci_dev *pdev)
 {
        struct device *dev = &pdev->dev;
@@ -349,6 +360,12 @@ static const struct sp_dev_vdata dev_vdata[] = {
 #endif
 #ifdef CONFIG_CRYPTO_DEV_SP_PSP
                .psp_vdata = &pspv3,
+#endif
+       },
+       {       /* 5 */
+               .bar = 2,
+#ifdef CONFIG_CRYPTO_DEV_SP_PSP
+               .psp_vdata = &pspv2,
 #endif
        },
 };
@@ -359,6 +376,7 @@ static const struct pci_device_id sp_pci_table[] = {
        { PCI_VDEVICE(AMD, 0x1486), (kernel_ulong_t)&dev_vdata[3] },
        { PCI_VDEVICE(AMD, 0x15DF), (kernel_ulong_t)&dev_vdata[4] },
        { PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[4] },
+       { PCI_VDEVICE(AMD, 0x14CA), (kernel_ulong_t)&dev_vdata[5] },
        /* Last entry must be zero */
        { 0, }
 };
@@ -371,6 +389,7 @@ static struct pci_driver sp_pci_driver = {
        .id_table = sp_pci_table,
        .probe = sp_pci_probe,
        .remove = sp_pci_remove,
+       .shutdown = sp_pci_shutdown,
        .driver.pm = &sp_pci_pm_ops,
 };
 
index 8b0640f..65a6413 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
+#include <linux/pm_runtime.h>
 #include <linux/topology.h>
 #include <linux/uacce.h>
 #include "hpre.h"
 #define HPRE_PREFETCH_DISABLE          BIT(30)
 #define HPRE_SVA_DISABLE_READY         (BIT(4) | BIT(8))
 
+/* clock gate */
+#define HPRE_CLKGATE_CTL               0x301a10
+#define HPRE_PEH_CFG_AUTO_GATE         0x301a2c
+#define HPRE_CLUSTER_DYN_CTL           0x302010
+#define HPRE_CORE_SHB_CFG              0x302088
+#define HPRE_CLKGATE_CTL_EN            BIT(0)
+#define HPRE_PEH_CFG_AUTO_GATE_EN      BIT(0)
+#define HPRE_CLUSTER_DYN_CTL_EN                BIT(0)
+#define HPRE_CORE_GATE_EN              (BIT(30) | BIT(31))
+
 #define HPRE_AM_OOO_SHUTDOWN_ENB       0x301044
 #define HPRE_AM_OOO_SHUTDOWN_ENABLE    BIT(0)
 #define HPRE_WR_MSI_PORT               BIT(2)
@@ -417,12 +428,63 @@ static void hpre_close_sva_prefetch(struct hisi_qm *qm)
                pci_err(qm->pdev, "failed to close sva prefetch\n");
 }
 
+static void hpre_enable_clock_gate(struct hisi_qm *qm)
+{
+       u32 val;
+
+       if (qm->ver < QM_HW_V3)
+               return;
+
+       val = readl(qm->io_base + HPRE_CLKGATE_CTL);
+       val |= HPRE_CLKGATE_CTL_EN;
+       writel(val, qm->io_base + HPRE_CLKGATE_CTL);
+
+       val = readl(qm->io_base + HPRE_PEH_CFG_AUTO_GATE);
+       val |= HPRE_PEH_CFG_AUTO_GATE_EN;
+       writel(val, qm->io_base + HPRE_PEH_CFG_AUTO_GATE);
+
+       val = readl(qm->io_base + HPRE_CLUSTER_DYN_CTL);
+       val |= HPRE_CLUSTER_DYN_CTL_EN;
+       writel(val, qm->io_base + HPRE_CLUSTER_DYN_CTL);
+
+       val = readl_relaxed(qm->io_base + HPRE_CORE_SHB_CFG);
+       val |= HPRE_CORE_GATE_EN;
+       writel(val, qm->io_base + HPRE_CORE_SHB_CFG);
+}
+
+static void hpre_disable_clock_gate(struct hisi_qm *qm)
+{
+       u32 val;
+
+       if (qm->ver < QM_HW_V3)
+               return;
+
+       val = readl(qm->io_base + HPRE_CLKGATE_CTL);
+       val &= ~HPRE_CLKGATE_CTL_EN;
+       writel(val, qm->io_base + HPRE_CLKGATE_CTL);
+
+       val = readl(qm->io_base + HPRE_PEH_CFG_AUTO_GATE);
+       val &= ~HPRE_PEH_CFG_AUTO_GATE_EN;
+       writel(val, qm->io_base + HPRE_PEH_CFG_AUTO_GATE);
+
+       val = readl(qm->io_base + HPRE_CLUSTER_DYN_CTL);
+       val &= ~HPRE_CLUSTER_DYN_CTL_EN;
+       writel(val, qm->io_base + HPRE_CLUSTER_DYN_CTL);
+
+       val = readl_relaxed(qm->io_base + HPRE_CORE_SHB_CFG);
+       val &= ~HPRE_CORE_GATE_EN;
+       writel(val, qm->io_base + HPRE_CORE_SHB_CFG);
+}
+
 static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
 {
        struct device *dev = &qm->pdev->dev;
        u32 val;
        int ret;
 
+       /* disabel dynamic clock gate before sram init */
+       hpre_disable_clock_gate(qm);
+
        writel(HPRE_QM_USR_CFG_MASK, qm->io_base + QM_ARUSER_M_CFG_ENABLE);
        writel(HPRE_QM_USR_CFG_MASK, qm->io_base + QM_AWUSER_M_CFG_ENABLE);
        writel_relaxed(HPRE_QM_AXI_CFG_MASK, qm->io_base + QM_AXI_M_CFG);
@@ -473,6 +535,8 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
        /* Config data buffer pasid needed by Kunpeng 920 */
        hpre_config_pasid(qm);
 
+       hpre_enable_clock_gate(qm);
+
        return ret;
 }
 
@@ -595,10 +659,15 @@ static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf,
                                    size_t count, loff_t *pos)
 {
        struct hpre_debugfs_file *file = filp->private_data;
+       struct hisi_qm *qm = hpre_file_to_qm(file);
        char tbuf[HPRE_DBGFS_VAL_MAX_LEN];
        u32 val;
        int ret;
 
+       ret = hisi_qm_get_dfx_access(qm);
+       if (ret)
+               return ret;
+
        spin_lock_irq(&file->lock);
        switch (file->type) {
        case HPRE_CLEAR_ENABLE:
@@ -608,18 +677,25 @@ static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf,
                val = hpre_cluster_inqry_read(file);
                break;
        default:
-               spin_unlock_irq(&file->lock);
-               return -EINVAL;
+               goto err_input;
        }
        spin_unlock_irq(&file->lock);
+
+       hisi_qm_put_dfx_access(qm);
        ret = snprintf(tbuf, HPRE_DBGFS_VAL_MAX_LEN, "%u\n", val);
        return simple_read_from_buffer(buf, count, pos, tbuf, ret);
+
+err_input:
+       spin_unlock_irq(&file->lock);
+       hisi_qm_put_dfx_access(qm);
+       return -EINVAL;
 }
 
 static ssize_t hpre_ctrl_debug_write(struct file *filp, const char __user *buf,
                                     size_t count, loff_t *pos)
 {
        struct hpre_debugfs_file *file = filp->private_data;
+       struct hisi_qm *qm = hpre_file_to_qm(file);
        char tbuf[HPRE_DBGFS_VAL_MAX_LEN];
        unsigned long val;
        int len, ret;
@@ -639,6 +715,10 @@ static ssize_t hpre_ctrl_debug_write(struct file *filp, const char __user *buf,
        if (kstrtoul(tbuf, 0, &val))
                return -EFAULT;
 
+       ret = hisi_qm_get_dfx_access(qm);
+       if (ret)
+               return ret;
+
        spin_lock_irq(&file->lock);
        switch (file->type) {
        case HPRE_CLEAR_ENABLE:
@@ -655,12 +735,12 @@ static ssize_t hpre_ctrl_debug_write(struct file *filp, const char __user *buf,
                ret = -EINVAL;
                goto err_input;
        }
-       spin_unlock_irq(&file->lock);
 
-       return count;
+       ret = count;
 
 err_input:
        spin_unlock_irq(&file->lock);
+       hisi_qm_put_dfx_access(qm);
        return ret;
 }
 
@@ -700,6 +780,24 @@ static int hpre_debugfs_atomic64_set(void *data, u64 val)
 DEFINE_DEBUGFS_ATTRIBUTE(hpre_atomic64_ops, hpre_debugfs_atomic64_get,
                         hpre_debugfs_atomic64_set, "%llu\n");
 
+static int hpre_com_regs_show(struct seq_file *s, void *unused)
+{
+       hisi_qm_regs_dump(s, s->private);
+
+       return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(hpre_com_regs);
+
+static int hpre_cluster_regs_show(struct seq_file *s, void *unused)
+{
+       hisi_qm_regs_dump(s, s->private);
+
+       return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(hpre_cluster_regs);
+
 static int hpre_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir,
                                    enum hpre_ctrl_dbgfs_file type, int indx)
 {
@@ -737,8 +835,11 @@ static int hpre_pf_comm_regs_debugfs_init(struct hisi_qm *qm)
        regset->regs = hpre_com_dfx_regs;
        regset->nregs = ARRAY_SIZE(hpre_com_dfx_regs);
        regset->base = qm->io_base;
+       regset->dev = dev;
+
+       debugfs_create_file("regs", 0444, qm->debug.debug_root,
+                           regset, &hpre_com_regs_fops);
 
-       debugfs_create_regset32("regs", 0444,  qm->debug.debug_root, regset);
        return 0;
 }
 
@@ -764,8 +865,10 @@ static int hpre_cluster_debugfs_init(struct hisi_qm *qm)
                regset->regs = hpre_cluster_dfx_regs;
                regset->nregs = ARRAY_SIZE(hpre_cluster_dfx_regs);
                regset->base = qm->io_base + hpre_cluster_offsets[i];
+               regset->dev = dev;
 
-               debugfs_create_regset32("regs", 0444, tmp_d, regset);
+               debugfs_create_file("regs", 0444, tmp_d, regset,
+                                   &hpre_cluster_regs_fops);
                ret = hpre_create_debugfs_file(qm, tmp_d, HPRE_CLUSTER_CTRL,
                                               i + HPRE_CLUSTER_CTRL);
                if (ret)
@@ -1017,6 +1120,8 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                        goto err_with_alg_register;
        }
 
+       hisi_qm_pm_init(qm);
+
        return 0;
 
 err_with_alg_register:
@@ -1040,6 +1145,7 @@ static void hpre_remove(struct pci_dev *pdev)
        struct hisi_qm *qm = pci_get_drvdata(pdev);
        int ret;
 
+       hisi_qm_pm_uninit(qm);
        hisi_qm_wait_task_finish(qm, &hpre_devices);
        hisi_qm_alg_unregister(qm, &hpre_devices);
        if (qm->fun_type == QM_HW_PF && qm->vfs_num) {
@@ -1062,6 +1168,10 @@ static void hpre_remove(struct pci_dev *pdev)
        hisi_qm_uninit(qm);
 }
 
+static const struct dev_pm_ops hpre_pm_ops = {
+       SET_RUNTIME_PM_OPS(hisi_qm_suspend, hisi_qm_resume, NULL)
+};
+
 static const struct pci_error_handlers hpre_err_handler = {
        .error_detected         = hisi_qm_dev_err_detected,
        .slot_reset             = hisi_qm_dev_slot_reset,
@@ -1078,6 +1188,7 @@ static struct pci_driver hpre_pci_driver = {
                                  hisi_qm_sriov_configure : NULL,
        .err_handler            = &hpre_err_handler,
        .shutdown               = hisi_qm_dev_shutdown,
+       .driver.pm              = &hpre_pm_ops,
 };
 
 static void hpre_register_debugfs(void)
index 1d67f94..369562d 100644 (file)
@@ -4,12 +4,12 @@
 #include <linux/acpi.h>
 #include <linux/aer.h>
 #include <linux/bitmap.h>
-#include <linux/debugfs.h>
 #include <linux/dma-mapping.h>
 #include <linux/idr.h>
 #include <linux/io.h>
 #include <linux/irqreturn.h>
 #include <linux/log2.h>
+#include <linux/pm_runtime.h>
 #include <linux/seq_file.h>
 #include <linux/slab.h>
 #include <linux/uacce.h>
 #define QM_QOS_MAX_CIR_S               11
 #define QM_QOS_VAL_MAX_LEN             32
 
+#define QM_AUTOSUSPEND_DELAY           3000
+
 #define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \
        (((hop_num) << QM_CQ_HOP_NUM_SHIFT)     | \
        ((pg_sz) << QM_CQ_PAGE_SIZE_SHIFT)      | \
@@ -734,6 +736,34 @@ static u32 qm_get_irq_num_v3(struct hisi_qm *qm)
        return QM_IRQ_NUM_VF_V3;
 }
 
+static int qm_pm_get_sync(struct hisi_qm *qm)
+{
+       struct device *dev = &qm->pdev->dev;
+       int ret;
+
+       if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3)
+               return 0;
+
+       ret = pm_runtime_resume_and_get(dev);
+       if (ret < 0) {
+               dev_err(dev, "failed to get_sync(%d).\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static void qm_pm_put_sync(struct hisi_qm *qm)
+{
+       struct device *dev = &qm->pdev->dev;
+
+       if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3)
+               return;
+
+       pm_runtime_mark_last_busy(dev);
+       pm_runtime_put_autosuspend(dev);
+}
+
 static struct hisi_qp *qm_to_hisi_qp(struct hisi_qm *qm, struct qm_eqe *eqe)
 {
        u16 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
@@ -1173,16 +1203,13 @@ static struct hisi_qm *file_to_qm(struct debugfs_file *file)
        return container_of(debug, struct hisi_qm, debug);
 }
 
-static u32 current_q_read(struct debugfs_file *file)
+static u32 current_q_read(struct hisi_qm *qm)
 {
-       struct hisi_qm *qm = file_to_qm(file);
-
        return readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) >> QM_DFX_QN_SHIFT;
 }
 
-static int current_q_write(struct debugfs_file *file, u32 val)
+static int current_q_write(struct hisi_qm *qm, u32 val)
 {
-       struct hisi_qm *qm = file_to_qm(file);
        u32 tmp;
 
        if (val >= qm->debug.curr_qm_qp_num)
@@ -1199,18 +1226,14 @@ static int current_q_write(struct debugfs_file *file, u32 val)
        return 0;
 }
 
-static u32 clear_enable_read(struct debugfs_file *file)
+static u32 clear_enable_read(struct hisi_qm *qm)
 {
-       struct hisi_qm *qm = file_to_qm(file);
-
        return readl(qm->io_base + QM_DFX_CNT_CLR_CE);
 }
 
 /* rd_clr_ctrl 1 enable read clear, otherwise 0 disable it */
-static int clear_enable_write(struct debugfs_file *file, u32 rd_clr_ctrl)
+static int clear_enable_write(struct hisi_qm *qm, u32 rd_clr_ctrl)
 {
-       struct hisi_qm *qm = file_to_qm(file);
-
        if (rd_clr_ctrl > 1)
                return -EINVAL;
 
@@ -1219,16 +1242,13 @@ static int clear_enable_write(struct debugfs_file *file, u32 rd_clr_ctrl)
        return 0;
 }
 
-static u32 current_qm_read(struct debugfs_file *file)
+static u32 current_qm_read(struct hisi_qm *qm)
 {
-       struct hisi_qm *qm = file_to_qm(file);
-
        return readl(qm->io_base + QM_DFX_MB_CNT_VF);
 }
 
-static int current_qm_write(struct debugfs_file *file, u32 val)
+static int current_qm_write(struct hisi_qm *qm, u32 val)
 {
-       struct hisi_qm *qm = file_to_qm(file);
        u32 tmp;
 
        if (val > qm->vfs_num)
@@ -1259,29 +1279,39 @@ static ssize_t qm_debug_read(struct file *filp, char __user *buf,
 {
        struct debugfs_file *file = filp->private_data;
        enum qm_debug_file index = file->index;
+       struct hisi_qm *qm = file_to_qm(file);
        char tbuf[QM_DBG_TMP_BUF_LEN];
        u32 val;
        int ret;
 
+       ret = hisi_qm_get_dfx_access(qm);
+       if (ret)
+               return ret;
+
        mutex_lock(&file->lock);
        switch (index) {
        case CURRENT_QM:
-               val = current_qm_read(file);
+               val = current_qm_read(qm);
                break;
        case CURRENT_Q:
-               val = current_q_read(file);
+               val = current_q_read(qm);
                break;
        case CLEAR_ENABLE:
-               val = clear_enable_read(file);
+               val = clear_enable_read(qm);
                break;
        default:
-               mutex_unlock(&file->lock);
-               return -EINVAL;
+               goto err_input;
        }
        mutex_unlock(&file->lock);
 
+       hisi_qm_put_dfx_access(qm);
        ret = scnprintf(tbuf, QM_DBG_TMP_BUF_LEN, "%u\n", val);
        return simple_read_from_buffer(buf, count, pos, tbuf, ret);
+
+err_input:
+       mutex_unlock(&file->lock);
+       hisi_qm_put_dfx_access(qm);
+       return -EINVAL;
 }
 
 static ssize_t qm_debug_write(struct file *filp, const char __user *buf,
@@ -1289,6 +1319,7 @@ static ssize_t qm_debug_write(struct file *filp, const char __user *buf,
 {
        struct debugfs_file *file = filp->private_data;
        enum qm_debug_file index = file->index;
+       struct hisi_qm *qm = file_to_qm(file);
        unsigned long val;
        char tbuf[QM_DBG_TMP_BUF_LEN];
        int len, ret;
@@ -1308,22 +1339,28 @@ static ssize_t qm_debug_write(struct file *filp, const char __user *buf,
        if (kstrtoul(tbuf, 0, &val))
                return -EFAULT;
 
+       ret = hisi_qm_get_dfx_access(qm);
+       if (ret)
+               return ret;
+
        mutex_lock(&file->lock);
        switch (index) {
        case CURRENT_QM:
-               ret = current_qm_write(file, val);
+               ret = current_qm_write(qm, val);
                break;
        case CURRENT_Q:
-               ret = current_q_write(file, val);
+               ret = current_q_write(qm, val);
                break;
        case CLEAR_ENABLE:
-               ret = clear_enable_write(file, val);
+               ret = clear_enable_write(qm, val);
                break;
        default:
                ret = -EINVAL;
        }
        mutex_unlock(&file->lock);
 
+       hisi_qm_put_dfx_access(qm);
+
        if (ret)
                return ret;
 
@@ -1337,13 +1374,8 @@ static const struct file_operations qm_debug_fops = {
        .write = qm_debug_write,
 };
 
-struct qm_dfx_registers {
-       char  *reg_name;
-       u64   reg_offset;
-};
-
 #define CNT_CYC_REGS_NUM               10
-static struct qm_dfx_registers qm_dfx_regs[] = {
+static const struct debugfs_reg32 qm_dfx_regs[] = {
        /* XXX_CNT are reading clear register */
        {"QM_ECC_1BIT_CNT               ",  0x104000ull},
        {"QM_ECC_MBIT_CNT               ",  0x104008ull},
@@ -1369,31 +1401,59 @@ static struct qm_dfx_registers qm_dfx_regs[] = {
        {"QM_DFX_FF_ST5                 ",  0x1040dcull},
        {"QM_DFX_FF_ST6                 ",  0x1040e0ull},
        {"QM_IN_IDLE_ST                 ",  0x1040e4ull},
-       { NULL, 0}
 };
 
-static struct qm_dfx_registers qm_vf_dfx_regs[] = {
+static const struct debugfs_reg32 qm_vf_dfx_regs[] = {
        {"QM_DFX_FUNS_ACTIVE_ST         ",  0x200ull},
-       { NULL, 0}
 };
 
-static int qm_regs_show(struct seq_file *s, void *unused)
+/**
+ * hisi_qm_regs_dump() - Dump registers's value.
+ * @s: debugfs file handle.
+ * @regset: accelerator registers information.
+ *
+ * Dump accelerator registers.
+ */
+void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset)
 {
-       struct hisi_qm *qm = s->private;
-       struct qm_dfx_registers *regs;
+       struct pci_dev *pdev = to_pci_dev(regset->dev);
+       struct hisi_qm *qm = pci_get_drvdata(pdev);
+       const struct debugfs_reg32 *regs = regset->regs;
+       int regs_len = regset->nregs;
+       int i, ret;
        u32 val;
 
-       if (qm->fun_type == QM_HW_PF)
-               regs = qm_dfx_regs;
-       else
-               regs = qm_vf_dfx_regs;
+       ret = hisi_qm_get_dfx_access(qm);
+       if (ret)
+               return;
 
-       while (regs->reg_name) {
-               val = readl(qm->io_base + regs->reg_offset);
-               seq_printf(s, "%s= 0x%08x\n", regs->reg_name, val);
-               regs++;
+       for (i = 0; i < regs_len; i++) {
+               val = readl(regset->base + regs[i].offset);
+               seq_printf(s, "%s= 0x%08x\n", regs[i].name, val);
        }
 
+       hisi_qm_put_dfx_access(qm);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_regs_dump);
+
+static int qm_regs_show(struct seq_file *s, void *unused)
+{
+       struct hisi_qm *qm = s->private;
+       struct debugfs_regset32 regset;
+
+       if (qm->fun_type == QM_HW_PF) {
+               regset.regs = qm_dfx_regs;
+               regset.nregs = ARRAY_SIZE(qm_dfx_regs);
+       } else {
+               regset.regs = qm_vf_dfx_regs;
+               regset.nregs = ARRAY_SIZE(qm_vf_dfx_regs);
+       }
+
+       regset.base = qm->io_base;
+       regset.dev = &qm->pdev->dev;
+
+       hisi_qm_regs_dump(s, &regset);
+
        return 0;
 }
 
@@ -1823,16 +1883,24 @@ static ssize_t qm_cmd_write(struct file *filp, const char __user *buffer,
        if (*pos)
                return 0;
 
+       ret = hisi_qm_get_dfx_access(qm);
+       if (ret)
+               return ret;
+
        /* Judge if the instance is being reset. */
        if (unlikely(atomic_read(&qm->status.flags) == QM_STOP))
                return 0;
 
-       if (count > QM_DBG_WRITE_LEN)
-               return -ENOSPC;
+       if (count > QM_DBG_WRITE_LEN) {
+               ret = -ENOSPC;
+               goto put_dfx_access;
+       }
 
        cmd_buf = memdup_user_nul(buffer, count);
-       if (IS_ERR(cmd_buf))
-               return PTR_ERR(cmd_buf);
+       if (IS_ERR(cmd_buf)) {
+               ret = PTR_ERR(cmd_buf);
+               goto put_dfx_access;
+       }
 
        cmd_buf_tmp = strchr(cmd_buf, '\n');
        if (cmd_buf_tmp) {
@@ -1843,12 +1911,16 @@ static ssize_t qm_cmd_write(struct file *filp, const char __user *buffer,
        ret = qm_cmd_write_dump(qm, cmd_buf);
        if (ret) {
                kfree(cmd_buf);
-               return ret;
+               goto put_dfx_access;
        }
 
        kfree(cmd_buf);
 
-       return count;
+       ret = count;
+
+put_dfx_access:
+       hisi_qm_put_dfx_access(qm);
+       return ret;
 }
 
 static const struct file_operations qm_cmd_fops = {
@@ -2445,11 +2517,19 @@ static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type)
 struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type)
 {
        struct hisi_qp *qp;
+       int ret;
+
+       ret = qm_pm_get_sync(qm);
+       if (ret)
+               return ERR_PTR(ret);
 
        down_write(&qm->qps_lock);
        qp = qm_create_qp_nolock(qm, alg_type);
        up_write(&qm->qps_lock);
 
+       if (IS_ERR(qp))
+               qm_pm_put_sync(qm);
+
        return qp;
 }
 EXPORT_SYMBOL_GPL(hisi_qm_create_qp);
@@ -2475,6 +2555,8 @@ void hisi_qm_release_qp(struct hisi_qp *qp)
        idr_remove(&qm->qp_idr, qp->qp_id);
 
        up_write(&qm->qps_lock);
+
+       qm_pm_put_sync(qm);
 }
 EXPORT_SYMBOL_GPL(hisi_qm_release_qp);
 
@@ -3200,6 +3282,10 @@ static void hisi_qm_pre_init(struct hisi_qm *qm)
        init_rwsem(&qm->qps_lock);
        qm->qp_in_used = 0;
        qm->misc_ctl = false;
+       if (qm->fun_type == QM_HW_PF && qm->ver > QM_HW_V2) {
+               if (!acpi_device_power_manageable(ACPI_COMPANION(&pdev->dev)))
+                       dev_info(&pdev->dev, "_PS0 and _PR0 are not defined");
+       }
 }
 
 static void qm_cmd_uninit(struct hisi_qm *qm)
@@ -4057,10 +4143,15 @@ static ssize_t qm_algqos_read(struct file *filp, char __user *buf,
        u32 qos_val, ir;
        int ret;
 
+       ret = hisi_qm_get_dfx_access(qm);
+       if (ret)
+               return ret;
+
        /* Mailbox and reset cannot be operated at the same time */
        if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
                pci_err(qm->pdev, "dev resetting, read alg qos failed!\n");
-               return  -EAGAIN;
+               ret = -EAGAIN;
+               goto err_put_dfx_access;
        }
 
        if (qm->fun_type == QM_HW_PF) {
@@ -4079,6 +4170,8 @@ static ssize_t qm_algqos_read(struct file *filp, char __user *buf,
 
 err_get_status:
        clear_bit(QM_RESETTING, &qm->misc_ctl);
+err_put_dfx_access:
+       hisi_qm_put_dfx_access(qm);
        return ret;
 }
 
@@ -4159,15 +4252,23 @@ static ssize_t qm_algqos_write(struct file *filp, const char __user *buf,
 
        fun_index = device * 8 + function;
 
+       ret = qm_pm_get_sync(qm);
+       if (ret) {
+               ret = -EINVAL;
+               goto err_get_status;
+       }
+
        ret = qm_func_shaper_enable(qm, fun_index, val);
        if (ret) {
                pci_err(qm->pdev, "failed to enable function shaper!\n");
                ret = -EINVAL;
-               goto err_get_status;
+               goto err_put_sync;
        }
 
-       ret =  count;
+       ret = count;
 
+err_put_sync:
+       qm_pm_put_sync(qm);
 err_get_status:
        clear_bit(QM_RESETTING, &qm->misc_ctl);
        return ret;
@@ -4245,7 +4346,7 @@ EXPORT_SYMBOL_GPL(hisi_qm_debug_init);
  */
 void hisi_qm_debug_regs_clear(struct hisi_qm *qm)
 {
-       struct qm_dfx_registers *regs;
+       const struct debugfs_reg32 *regs;
        int i;
 
        /* clear current_qm */
@@ -4264,7 +4365,7 @@ void hisi_qm_debug_regs_clear(struct hisi_qm *qm)
 
        regs = qm_dfx_regs;
        for (i = 0; i < CNT_CYC_REGS_NUM; i++) {
-               readl(qm->io_base + regs->reg_offset);
+               readl(qm->io_base + regs->offset);
                regs++;
        }
 
@@ -4287,19 +4388,23 @@ int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs)
        struct hisi_qm *qm = pci_get_drvdata(pdev);
        int pre_existing_vfs, num_vfs, total_vfs, ret;
 
+       ret = qm_pm_get_sync(qm);
+       if (ret)
+               return ret;
+
        total_vfs = pci_sriov_get_totalvfs(pdev);
        pre_existing_vfs = pci_num_vf(pdev);
        if (pre_existing_vfs) {
                pci_err(pdev, "%d VFs already enabled. Please disable pre-enabled VFs!\n",
                        pre_existing_vfs);
-               return 0;
+               goto err_put_sync;
        }
 
        num_vfs = min_t(int, max_vfs, total_vfs);
        ret = qm_vf_q_assign(qm, num_vfs);
        if (ret) {
                pci_err(pdev, "Can't assign queues for VF!\n");
-               return ret;
+               goto err_put_sync;
        }
 
        qm->vfs_num = num_vfs;
@@ -4308,12 +4413,16 @@ int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs)
        if (ret) {
                pci_err(pdev, "Can't enable VF!\n");
                qm_clear_vft_config(qm);
-               return ret;
+               goto err_put_sync;
        }
 
        pci_info(pdev, "VF enabled, vfs_num(=%d)!\n", num_vfs);
 
        return num_vfs;
+
+err_put_sync:
+       qm_pm_put_sync(qm);
+       return ret;
 }
 EXPORT_SYMBOL_GPL(hisi_qm_sriov_enable);
 
@@ -4328,6 +4437,7 @@ int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen)
 {
        struct hisi_qm *qm = pci_get_drvdata(pdev);
        int total_vfs = pci_sriov_get_totalvfs(qm->pdev);
+       int ret;
 
        if (pci_vfs_assigned(pdev)) {
                pci_err(pdev, "Failed to disable VFs as VFs are assigned!\n");
@@ -4343,8 +4453,13 @@ int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen)
        pci_disable_sriov(pdev);
        /* clear vf function shaper configure array */
        memset(qm->factor + 1, 0, sizeof(struct qm_shaper_factor) * total_vfs);
+       ret = qm_clear_vft_config(qm);
+       if (ret)
+               return ret;
 
-       return qm_clear_vft_config(qm);
+       qm_pm_put_sync(qm);
+
+       return 0;
 }
 EXPORT_SYMBOL_GPL(hisi_qm_sriov_disable);
 
@@ -5164,11 +5279,18 @@ static void hisi_qm_controller_reset(struct work_struct *rst_work)
        struct hisi_qm *qm = container_of(rst_work, struct hisi_qm, rst_work);
        int ret;
 
+       ret = qm_pm_get_sync(qm);
+       if (ret) {
+               clear_bit(QM_RST_SCHED, &qm->misc_ctl);
+               return;
+       }
+
        /* reset pcie device controller */
        ret = qm_controller_reset(qm);
        if (ret)
                dev_err(&qm->pdev->dev, "controller reset failed (%d)\n", ret);
 
+       qm_pm_put_sync(qm);
 }
 
 static void qm_pf_reset_vf_prepare(struct hisi_qm *qm,
@@ -5680,6 +5802,194 @@ err_pci_init:
 }
 EXPORT_SYMBOL_GPL(hisi_qm_init);
 
+/**
+ * hisi_qm_get_dfx_access() - Try to get dfx access.
+ * @qm: pointer to accelerator device.
+ *
+ * Try to get dfx access, then user can get message.
+ *
+ * If device is in suspended, return failure, otherwise
+ * bump up the runtime PM usage counter.
+ */
+int hisi_qm_get_dfx_access(struct hisi_qm *qm)
+{
+       struct device *dev = &qm->pdev->dev;
+
+       if (pm_runtime_suspended(dev)) {
+               dev_info(dev, "can not read/write - device in suspended.\n");
+               return -EAGAIN;
+       }
+
+       return qm_pm_get_sync(qm);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_get_dfx_access);
+
+/**
+ * hisi_qm_put_dfx_access() - Put dfx access.
+ * @qm: pointer to accelerator device.
+ *
+ * Put dfx access, drop runtime PM usage counter.
+ */
+void hisi_qm_put_dfx_access(struct hisi_qm *qm)
+{
+       qm_pm_put_sync(qm);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_put_dfx_access);
+
+/**
+ * hisi_qm_pm_init() - Initialize qm runtime PM.
+ * @qm: pointer to accelerator device.
+ *
+ * Function that initialize qm runtime PM.
+ */
+void hisi_qm_pm_init(struct hisi_qm *qm)
+{
+       struct device *dev = &qm->pdev->dev;
+
+       if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3)
+               return;
+
+       pm_runtime_set_autosuspend_delay(dev, QM_AUTOSUSPEND_DELAY);
+       pm_runtime_use_autosuspend(dev);
+       pm_runtime_put_noidle(dev);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_pm_init);
+
+/**
+ * hisi_qm_pm_uninit() - Uninitialize qm runtime PM.
+ * @qm: pointer to accelerator device.
+ *
+ * Function that uninitialize qm runtime PM.
+ */
+void hisi_qm_pm_uninit(struct hisi_qm *qm)
+{
+       struct device *dev = &qm->pdev->dev;
+
+       if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3)
+               return;
+
+       pm_runtime_get_noresume(dev);
+       pm_runtime_dont_use_autosuspend(dev);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_pm_uninit);
+
+static int qm_prepare_for_suspend(struct hisi_qm *qm)
+{
+       struct pci_dev *pdev = qm->pdev;
+       int ret;
+       u32 val;
+
+       ret = qm->ops->set_msi(qm, false);
+       if (ret) {
+               pci_err(pdev, "failed to disable MSI before suspending!\n");
+               return ret;
+       }
+
+       /* shutdown OOO register */
+       writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN,
+              qm->io_base + ACC_MASTER_GLOBAL_CTRL);
+
+       ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN,
+                                        val,
+                                        (val == ACC_MASTER_TRANS_RETURN_RW),
+                                        POLL_PERIOD, POLL_TIMEOUT);
+       if (ret) {
+               pci_emerg(pdev, "Bus lock! Please reset system.\n");
+               return ret;
+       }
+
+       ret = qm_set_pf_mse(qm, false);
+       if (ret)
+               pci_err(pdev, "failed to disable MSE before suspending!\n");
+
+       return ret;
+}
+
+static int qm_rebuild_for_resume(struct hisi_qm *qm)
+{
+       struct pci_dev *pdev = qm->pdev;
+       int ret;
+
+       ret = qm_set_pf_mse(qm, true);
+       if (ret) {
+               pci_err(pdev, "failed to enable MSE after resuming!\n");
+               return ret;
+       }
+
+       ret = qm->ops->set_msi(qm, true);
+       if (ret) {
+               pci_err(pdev, "failed to enable MSI after resuming!\n");
+               return ret;
+       }
+
+       ret = qm_dev_hw_init(qm);
+       if (ret) {
+               pci_err(pdev, "failed to init device after resuming\n");
+               return ret;
+       }
+
+       qm_cmd_init(qm);
+       hisi_qm_dev_err_init(qm);
+
+       return 0;
+}
+
+/**
+ * hisi_qm_suspend() - Runtime suspend of given device.
+ * @dev: device to suspend.
+ *
+ * Function that suspend the device.
+ */
+int hisi_qm_suspend(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct hisi_qm *qm = pci_get_drvdata(pdev);
+       int ret;
+
+       pci_info(pdev, "entering suspended state\n");
+
+       ret = hisi_qm_stop(qm, QM_NORMAL);
+       if (ret) {
+               pci_err(pdev, "failed to stop qm(%d)\n", ret);
+               return ret;
+       }
+
+       ret = qm_prepare_for_suspend(qm);
+       if (ret)
+               pci_err(pdev, "failed to prepare suspended(%d)\n", ret);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(hisi_qm_suspend);
+
+/**
+ * hisi_qm_resume() - Runtime resume of given device.
+ * @dev: device to resume.
+ *
+ * Function that resume the device.
+ */
+int hisi_qm_resume(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct hisi_qm *qm = pci_get_drvdata(pdev);
+       int ret;
+
+       pci_info(pdev, "resuming from suspend state\n");
+
+       ret = qm_rebuild_for_resume(qm);
+       if (ret) {
+               pci_err(pdev, "failed to rebuild resume(%d)\n", ret);
+               return ret;
+       }
+
+       ret = hisi_qm_start(qm);
+       if (ret)
+               pci_err(pdev, "failed to start qm(%d)\n", ret);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(hisi_qm_resume);
+
 MODULE_LICENSE("GPL v2");
 MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>");
 MODULE_DESCRIPTION("HiSilicon Accelerator queue manager driver");
index 035eaf8..3068093 100644 (file)
@@ -4,6 +4,7 @@
 #define HISI_ACC_QM_H
 
 #include <linux/bitfield.h>
+#include <linux/debugfs.h>
 #include <linux/iopoll.h>
 #include <linux/module.h>
 #include <linux/pci.h>
@@ -430,4 +431,11 @@ void hisi_qm_dev_shutdown(struct pci_dev *pdev);
 void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
 int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
 void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
+int hisi_qm_resume(struct device *dev);
+int hisi_qm_suspend(struct device *dev);
+void hisi_qm_pm_uninit(struct hisi_qm *qm);
+void hisi_qm_pm_init(struct hisi_qm *qm);
+int hisi_qm_get_dfx_access(struct hisi_qm *qm);
+void hisi_qm_put_dfx_access(struct hisi_qm *qm);
+void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset);
 #endif
index 018415b..d97cf02 100644 (file)
@@ -157,11 +157,6 @@ struct sec_ctx {
        struct device *dev;
 };
 
-enum sec_endian {
-       SEC_LE = 0,
-       SEC_32BE,
-       SEC_64BE
-};
 
 enum sec_debug_file_index {
        SEC_CLEAR_ENABLE,
index 490db7b..90551bf 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
+#include <linux/pm_runtime.h>
 #include <linux/seq_file.h>
 #include <linux/topology.h>
 #include <linux/uacce.h>
 #define SEC_MEM_START_INIT_REG 0x301100
 #define SEC_MEM_INIT_DONE_REG          0x301104
 
+/* clock gating */
 #define SEC_CONTROL_REG                0x301200
-#define SEC_TRNG_EN_SHIFT              8
+#define SEC_DYNAMIC_GATE_REG           0x30121c
+#define SEC_CORE_AUTO_GATE             0x30212c
+#define SEC_DYNAMIC_GATE_EN            0x7bff
+#define SEC_CORE_AUTO_GATE_EN          GENMASK(3, 0)
 #define SEC_CLK_GATE_ENABLE            BIT(3)
 #define SEC_CLK_GATE_DISABLE           (~BIT(3))
+
+#define SEC_TRNG_EN_SHIFT              8
 #define SEC_AXI_SHUTDOWN_ENABLE        BIT(12)
 #define SEC_AXI_SHUTDOWN_DISABLE       0xFFFFEFFF
 
@@ -312,31 +319,20 @@ static const struct pci_device_id sec_dev_ids[] = {
 };
 MODULE_DEVICE_TABLE(pci, sec_dev_ids);
 
-static u8 sec_get_endian(struct hisi_qm *qm)
+static void sec_set_endian(struct hisi_qm *qm)
 {
        u32 reg;
 
-       /*
-        * As for VF, it is a wrong way to get endian setting by
-        * reading a register of the engine
-        */
-       if (qm->pdev->is_virtfn) {
-               dev_err_ratelimited(&qm->pdev->dev,
-                                   "cannot access a register in VF!\n");
-               return SEC_LE;
-       }
        reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
-       /* BD little endian mode */
-       if (!(reg & BIT(0)))
-               return SEC_LE;
+       reg &= ~(BIT(1) | BIT(0));
+       if (!IS_ENABLED(CONFIG_64BIT))
+               reg |= BIT(1);
 
-       /* BD 32-bits big endian mode */
-       else if (!(reg & BIT(1)))
-               return SEC_32BE;
 
-       /* BD 64-bits big endian mode */
-       else
-               return SEC_64BE;
+       if (!IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
+               reg |= BIT(0);
+
+       writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
 }
 
 static void sec_open_sva_prefetch(struct hisi_qm *qm)
@@ -378,15 +374,43 @@ static void sec_close_sva_prefetch(struct hisi_qm *qm)
                pci_err(qm->pdev, "failed to close sva prefetch\n");
 }
 
+static void sec_enable_clock_gate(struct hisi_qm *qm)
+{
+       u32 val;
+
+       if (qm->ver < QM_HW_V3)
+               return;
+
+       val = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
+       val |= SEC_CLK_GATE_ENABLE;
+       writel_relaxed(val, qm->io_base + SEC_CONTROL_REG);
+
+       val = readl(qm->io_base + SEC_DYNAMIC_GATE_REG);
+       val |= SEC_DYNAMIC_GATE_EN;
+       writel(val, qm->io_base + SEC_DYNAMIC_GATE_REG);
+
+       val = readl(qm->io_base + SEC_CORE_AUTO_GATE);
+       val |= SEC_CORE_AUTO_GATE_EN;
+       writel(val, qm->io_base + SEC_CORE_AUTO_GATE);
+}
+
+static void sec_disable_clock_gate(struct hisi_qm *qm)
+{
+       u32 val;
+
+       /* Kunpeng920 needs to close clock gating */
+       val = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
+       val &= SEC_CLK_GATE_DISABLE;
+       writel_relaxed(val, qm->io_base + SEC_CONTROL_REG);
+}
+
 static int sec_engine_init(struct hisi_qm *qm)
 {
        int ret;
        u32 reg;
 
-       /* disable clock gate control */
-       reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
-       reg &= SEC_CLK_GATE_DISABLE;
-       writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
+       /* disable clock gate control before mem init */
+       sec_disable_clock_gate(qm);
 
        writel_relaxed(0x1, qm->io_base + SEC_MEM_START_INIT_REG);
 
@@ -429,9 +453,9 @@ static int sec_engine_init(struct hisi_qm *qm)
                       qm->io_base + SEC_BD_ERR_CHK_EN_REG3);
 
        /* config endian */
-       reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
-       reg |= sec_get_endian(qm);
-       writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
+       sec_set_endian(qm);
+
+       sec_enable_clock_gate(qm);
 
        return 0;
 }
@@ -533,17 +557,14 @@ static void sec_hw_error_disable(struct hisi_qm *qm)
        writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_NFE_REG);
 }
 
-static u32 sec_clear_enable_read(struct sec_debug_file *file)
+static u32 sec_clear_enable_read(struct hisi_qm *qm)
 {
-       struct hisi_qm *qm = file->qm;
-
        return readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) &
                        SEC_CTRL_CNT_CLR_CE_BIT;
 }
 
-static int sec_clear_enable_write(struct sec_debug_file *file, u32 val)
+static int sec_clear_enable_write(struct hisi_qm *qm, u32 val)
 {
-       struct hisi_qm *qm = file->qm;
        u32 tmp;
 
        if (val != 1 && val)
@@ -561,24 +582,34 @@ static ssize_t sec_debug_read(struct file *filp, char __user *buf,
 {
        struct sec_debug_file *file = filp->private_data;
        char tbuf[SEC_DBGFS_VAL_MAX_LEN];
+       struct hisi_qm *qm = file->qm;
        u32 val;
        int ret;
 
+       ret = hisi_qm_get_dfx_access(qm);
+       if (ret)
+               return ret;
+
        spin_lock_irq(&file->lock);
 
        switch (file->index) {
        case SEC_CLEAR_ENABLE:
-               val = sec_clear_enable_read(file);
+               val = sec_clear_enable_read(qm);
                break;
        default:
-               spin_unlock_irq(&file->lock);
-               return -EINVAL;
+               goto err_input;
        }
 
        spin_unlock_irq(&file->lock);
-       ret = snprintf(tbuf, SEC_DBGFS_VAL_MAX_LEN, "%u\n", val);
 
+       hisi_qm_put_dfx_access(qm);
+       ret = snprintf(tbuf, SEC_DBGFS_VAL_MAX_LEN, "%u\n", val);
        return simple_read_from_buffer(buf, count, pos, tbuf, ret);
+
+err_input:
+       spin_unlock_irq(&file->lock);
+       hisi_qm_put_dfx_access(qm);
+       return -EINVAL;
 }
 
 static ssize_t sec_debug_write(struct file *filp, const char __user *buf,
@@ -586,6 +617,7 @@ static ssize_t sec_debug_write(struct file *filp, const char __user *buf,
 {
        struct sec_debug_file *file = filp->private_data;
        char tbuf[SEC_DBGFS_VAL_MAX_LEN];
+       struct hisi_qm *qm = file->qm;
        unsigned long val;
        int len, ret;
 
@@ -604,11 +636,15 @@ static ssize_t sec_debug_write(struct file *filp, const char __user *buf,
        if (kstrtoul(tbuf, 0, &val))
                return -EFAULT;
 
+       ret = hisi_qm_get_dfx_access(qm);
+       if (ret)
+               return ret;
+
        spin_lock_irq(&file->lock);
 
        switch (file->index) {
        case SEC_CLEAR_ENABLE:
-               ret = sec_clear_enable_write(file, val);
+               ret = sec_clear_enable_write(qm, val);
                if (ret)
                        goto err_input;
                break;
@@ -617,12 +653,11 @@ static ssize_t sec_debug_write(struct file *filp, const char __user *buf,
                goto err_input;
        }
 
-       spin_unlock_irq(&file->lock);
-
-       return count;
+       ret = count;
 
  err_input:
        spin_unlock_irq(&file->lock);
+       hisi_qm_put_dfx_access(qm);
        return ret;
 }
 
@@ -653,6 +688,15 @@ static int sec_debugfs_atomic64_set(void *data, u64 val)
 DEFINE_DEBUGFS_ATTRIBUTE(sec_atomic64_ops, sec_debugfs_atomic64_get,
                         sec_debugfs_atomic64_set, "%lld\n");
 
+static int sec_regs_show(struct seq_file *s, void *unused)
+{
+       hisi_qm_regs_dump(s, s->private);
+
+       return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(sec_regs);
+
 static int sec_core_debug_init(struct hisi_qm *qm)
 {
        struct sec_dev *sec = container_of(qm, struct sec_dev, qm);
@@ -671,9 +715,10 @@ static int sec_core_debug_init(struct hisi_qm *qm)
        regset->regs = sec_dfx_regs;
        regset->nregs = ARRAY_SIZE(sec_dfx_regs);
        regset->base = qm->io_base;
+       regset->dev = dev;
 
        if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID)
-               debugfs_create_regset32("regs", 0444, tmp_d, regset);
+               debugfs_create_file("regs", 0444, tmp_d, regset, &sec_regs_fops);
 
        for (i = 0; i < ARRAY_SIZE(sec_dfx_labels); i++) {
                atomic64_t *data = (atomic64_t *)((uintptr_t)dfx +
@@ -981,10 +1026,13 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                        goto err_alg_unregister;
        }
 
+       hisi_qm_pm_init(qm);
+
        return 0;
 
 err_alg_unregister:
-       hisi_qm_alg_unregister(qm, &sec_devices);
+       if (qm->qp_num >= ctx_q_num)
+               hisi_qm_alg_unregister(qm, &sec_devices);
 err_qm_stop:
        sec_debugfs_exit(qm);
        hisi_qm_stop(qm, QM_NORMAL);
@@ -999,6 +1047,7 @@ static void sec_remove(struct pci_dev *pdev)
 {
        struct hisi_qm *qm = pci_get_drvdata(pdev);
 
+       hisi_qm_pm_uninit(qm);
        hisi_qm_wait_task_finish(qm, &sec_devices);
        if (qm->qp_num >= ctx_q_num)
                hisi_qm_alg_unregister(qm, &sec_devices);
@@ -1018,6 +1067,10 @@ static void sec_remove(struct pci_dev *pdev)
        sec_qm_uninit(qm);
 }
 
+static const struct dev_pm_ops sec_pm_ops = {
+       SET_RUNTIME_PM_OPS(hisi_qm_suspend, hisi_qm_resume, NULL)
+};
+
 static const struct pci_error_handlers sec_err_handler = {
        .error_detected = hisi_qm_dev_err_detected,
        .slot_reset     = hisi_qm_dev_slot_reset,
@@ -1033,6 +1086,7 @@ static struct pci_driver sec_pci_driver = {
        .err_handler = &sec_err_handler,
        .sriov_configure = hisi_qm_sriov_configure,
        .shutdown = hisi_qm_dev_shutdown,
+       .driver.pm = &sec_pm_ops,
 };
 
 static void sec_register_debugfs(void)
index f8482ce..7148201 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
+#include <linux/pm_runtime.h>
 #include <linux/seq_file.h>
 #include <linux/topology.h>
 #include <linux/uacce.h>
 #define HZIP_DELAY_1_US                1
 #define HZIP_POLL_TIMEOUT_US   1000
 
+/* clock gating */
+#define HZIP_PEH_CFG_AUTO_GATE         0x3011A8
+#define HZIP_PEH_CFG_AUTO_GATE_EN      BIT(0)
+#define HZIP_CORE_GATED_EN             GENMASK(15, 8)
+#define HZIP_CORE_GATED_OOO_EN         BIT(29)
+#define HZIP_CLOCK_GATED_EN            (HZIP_CORE_GATED_EN | \
+                                        HZIP_CORE_GATED_OOO_EN)
+
 static const char hisi_zip_name[] = "hisi_zip";
 static struct dentry *hzip_debugfs_root;
 
@@ -312,6 +321,22 @@ static void hisi_zip_close_sva_prefetch(struct hisi_qm *qm)
                pci_err(qm->pdev, "failed to close sva prefetch\n");
 }
 
+static void hisi_zip_enable_clock_gate(struct hisi_qm *qm)
+{
+       u32 val;
+
+       if (qm->ver < QM_HW_V3)
+               return;
+
+       val = readl(qm->io_base + HZIP_CLOCK_GATE_CTRL);
+       val |= HZIP_CLOCK_GATED_EN;
+       writel(val, qm->io_base + HZIP_CLOCK_GATE_CTRL);
+
+       val = readl(qm->io_base + HZIP_PEH_CFG_AUTO_GATE);
+       val |= HZIP_PEH_CFG_AUTO_GATE_EN;
+       writel(val, qm->io_base + HZIP_PEH_CFG_AUTO_GATE);
+}
+
 static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
 {
        void __iomem *base = qm->io_base;
@@ -359,6 +384,8 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
               CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) |
               FIELD_PREP(CQC_CACHE_WB_THRD, 1), base + QM_CACHE_CTL);
 
+       hisi_zip_enable_clock_gate(qm);
+
        return 0;
 }
 
@@ -423,17 +450,14 @@ static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file)
        return &hisi_zip->qm;
 }
 
-static u32 clear_enable_read(struct ctrl_debug_file *file)
+static u32 clear_enable_read(struct hisi_qm *qm)
 {
-       struct hisi_qm *qm = file_to_qm(file);
-
        return readl(qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE) &
                     HZIP_SOFT_CTRL_CNT_CLR_CE_BIT;
 }
 
-static int clear_enable_write(struct ctrl_debug_file *file, u32 val)
+static int clear_enable_write(struct hisi_qm *qm, u32 val)
 {
-       struct hisi_qm *qm = file_to_qm(file);
        u32 tmp;
 
        if (val != 1 && val != 0)
@@ -450,22 +474,33 @@ static ssize_t hisi_zip_ctrl_debug_read(struct file *filp, char __user *buf,
                                        size_t count, loff_t *pos)
 {
        struct ctrl_debug_file *file = filp->private_data;
+       struct hisi_qm *qm = file_to_qm(file);
        char tbuf[HZIP_BUF_SIZE];
        u32 val;
        int ret;
 
+       ret = hisi_qm_get_dfx_access(qm);
+       if (ret)
+               return ret;
+
        spin_lock_irq(&file->lock);
        switch (file->index) {
        case HZIP_CLEAR_ENABLE:
-               val = clear_enable_read(file);
+               val = clear_enable_read(qm);
                break;
        default:
-               spin_unlock_irq(&file->lock);
-               return -EINVAL;
+               goto err_input;
        }
        spin_unlock_irq(&file->lock);
+
+       hisi_qm_put_dfx_access(qm);
        ret = scnprintf(tbuf, sizeof(tbuf), "%u\n", val);
        return simple_read_from_buffer(buf, count, pos, tbuf, ret);
+
+err_input:
+       spin_unlock_irq(&file->lock);
+       hisi_qm_put_dfx_access(qm);
+       return -EINVAL;
 }
 
 static ssize_t hisi_zip_ctrl_debug_write(struct file *filp,
@@ -473,6 +508,7 @@ static ssize_t hisi_zip_ctrl_debug_write(struct file *filp,
                                         size_t count, loff_t *pos)
 {
        struct ctrl_debug_file *file = filp->private_data;
+       struct hisi_qm *qm = file_to_qm(file);
        char tbuf[HZIP_BUF_SIZE];
        unsigned long val;
        int len, ret;
@@ -491,10 +527,14 @@ static ssize_t hisi_zip_ctrl_debug_write(struct file *filp,
        if (kstrtoul(tbuf, 0, &val))
                return -EFAULT;
 
+       ret = hisi_qm_get_dfx_access(qm);
+       if (ret)
+               return ret;
+
        spin_lock_irq(&file->lock);
        switch (file->index) {
        case HZIP_CLEAR_ENABLE:
-               ret = clear_enable_write(file, val);
+               ret = clear_enable_write(qm, val);
                if (ret)
                        goto err_input;
                break;
@@ -502,12 +542,12 @@ static ssize_t hisi_zip_ctrl_debug_write(struct file *filp,
                ret = -EINVAL;
                goto err_input;
        }
-       spin_unlock_irq(&file->lock);
 
-       return count;
+       ret = count;
 
 err_input:
        spin_unlock_irq(&file->lock);
+       hisi_qm_put_dfx_access(qm);
        return ret;
 }
 
@@ -538,6 +578,15 @@ static int zip_debugfs_atomic64_get(void *data, u64 *val)
 DEFINE_DEBUGFS_ATTRIBUTE(zip_atomic64_ops, zip_debugfs_atomic64_get,
                         zip_debugfs_atomic64_set, "%llu\n");
 
+static int hisi_zip_regs_show(struct seq_file *s, void *unused)
+{
+       hisi_qm_regs_dump(s, s->private);
+
+       return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(hisi_zip_regs);
+
 static int hisi_zip_core_debug_init(struct hisi_qm *qm)
 {
        struct device *dev = &qm->pdev->dev;
@@ -560,9 +609,11 @@ static int hisi_zip_core_debug_init(struct hisi_qm *qm)
                regset->regs = hzip_dfx_regs;
                regset->nregs = ARRAY_SIZE(hzip_dfx_regs);
                regset->base = qm->io_base + core_offsets[i];
+               regset->dev = dev;
 
                tmp_d = debugfs_create_dir(buf, qm->debug.debug_root);
-               debugfs_create_regset32("regs", 0444, tmp_d, regset);
+               debugfs_create_file("regs", 0444, tmp_d, regset,
+                                    &hisi_zip_regs_fops);
        }
 
        return 0;
@@ -898,6 +949,8 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                        goto err_qm_alg_unregister;
        }
 
+       hisi_qm_pm_init(qm);
+
        return 0;
 
 err_qm_alg_unregister:
@@ -920,6 +973,7 @@ static void hisi_zip_remove(struct pci_dev *pdev)
 {
        struct hisi_qm *qm = pci_get_drvdata(pdev);
 
+       hisi_qm_pm_uninit(qm);
        hisi_qm_wait_task_finish(qm, &zip_devices);
        hisi_qm_alg_unregister(qm, &zip_devices);
 
@@ -932,6 +986,10 @@ static void hisi_zip_remove(struct pci_dev *pdev)
        hisi_zip_qm_uninit(qm);
 }
 
+static const struct dev_pm_ops hisi_zip_pm_ops = {
+       SET_RUNTIME_PM_OPS(hisi_qm_suspend, hisi_qm_resume, NULL)
+};
+
 static const struct pci_error_handlers hisi_zip_err_handler = {
        .error_detected = hisi_qm_dev_err_detected,
        .slot_reset     = hisi_qm_dev_slot_reset,
@@ -948,6 +1006,7 @@ static struct pci_driver hisi_zip_pci_driver = {
                                        hisi_qm_sriov_configure : NULL,
        .err_handler            = &hisi_zip_err_handler,
        .shutdown               = hisi_qm_dev_shutdown,
+       .driver.pm              = &hisi_zip_pm_ops,
 };
 
 static void hisi_zip_register_debugfs(void)
index d6a7784..d19e5ff 100644 (file)
@@ -170,15 +170,19 @@ static struct dcp *global_sdcp;
 
 static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
 {
+       int dma_err;
        struct dcp *sdcp = global_sdcp;
        const int chan = actx->chan;
        uint32_t stat;
        unsigned long ret;
        struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
-
        dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc),
                                              DMA_TO_DEVICE);
 
+       dma_err = dma_mapping_error(sdcp->dev, desc_phys);
+       if (dma_err)
+               return dma_err;
+
        reinit_completion(&sdcp->completion[chan]);
 
        /* Clear status register. */
@@ -216,18 +220,29 @@ static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
 static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
                           struct skcipher_request *req, int init)
 {
+       dma_addr_t key_phys, src_phys, dst_phys;
        struct dcp *sdcp = global_sdcp;
        struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
        struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
        int ret;
 
-       dma_addr_t key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
-                                            2 * AES_KEYSIZE_128,
-                                            DMA_TO_DEVICE);
-       dma_addr_t src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
-                                            DCP_BUF_SZ, DMA_TO_DEVICE);
-       dma_addr_t dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
-                                            DCP_BUF_SZ, DMA_FROM_DEVICE);
+       key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
+                                 2 * AES_KEYSIZE_128, DMA_TO_DEVICE);
+       ret = dma_mapping_error(sdcp->dev, key_phys);
+       if (ret)
+               return ret;
+
+       src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
+                                 DCP_BUF_SZ, DMA_TO_DEVICE);
+       ret = dma_mapping_error(sdcp->dev, src_phys);
+       if (ret)
+               goto err_src;
+
+       dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
+                                 DCP_BUF_SZ, DMA_FROM_DEVICE);
+       ret = dma_mapping_error(sdcp->dev, dst_phys);
+       if (ret)
+               goto err_dst;
 
        if (actx->fill % AES_BLOCK_SIZE) {
                dev_err(sdcp->dev, "Invalid block size!\n");
@@ -265,10 +280,12 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
        ret = mxs_dcp_start_dma(actx);
 
 aes_done_run:
+       dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE);
+err_dst:
+       dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
+err_src:
        dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
                         DMA_TO_DEVICE);
-       dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
-       dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE);
 
        return ret;
 }
@@ -283,21 +300,20 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
 
        struct scatterlist *dst = req->dst;
        struct scatterlist *src = req->src;
-       const int nents = sg_nents(req->src);
+       int dst_nents = sg_nents(dst);
 
        const int out_off = DCP_BUF_SZ;
        uint8_t *in_buf = sdcp->coh->aes_in_buf;
        uint8_t *out_buf = sdcp->coh->aes_out_buf;
 
-       uint8_t *out_tmp, *src_buf, *dst_buf = NULL;
        uint32_t dst_off = 0;
+       uint8_t *src_buf = NULL;
        uint32_t last_out_len = 0;
 
        uint8_t *key = sdcp->coh->aes_key;
 
        int ret = 0;
-       int split = 0;
-       unsigned int i, len, clen, rem = 0, tlen = 0;
+       unsigned int i, len, clen, tlen = 0;
        int init = 0;
        bool limit_hit = false;
 
@@ -315,7 +331,7 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
                memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128);
        }
 
-       for_each_sg(req->src, src, nents, i) {
+       for_each_sg(req->src, src, sg_nents(src), i) {
                src_buf = sg_virt(src);
                len = sg_dma_len(src);
                tlen += len;
@@ -340,34 +356,17 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
                         * submit the buffer.
                         */
                        if (actx->fill == out_off || sg_is_last(src) ||
-                               limit_hit) {
+                           limit_hit) {
                                ret = mxs_dcp_run_aes(actx, req, init);
                                if (ret)
                                        return ret;
                                init = 0;
 
-                               out_tmp = out_buf;
+                               sg_pcopy_from_buffer(dst, dst_nents, out_buf,
+                                                    actx->fill, dst_off);
+                               dst_off += actx->fill;
                                last_out_len = actx->fill;
-                               while (dst && actx->fill) {
-                                       if (!split) {
-                                               dst_buf = sg_virt(dst);
-                                               dst_off = 0;
-                                       }
-                                       rem = min(sg_dma_len(dst) - dst_off,
-                                                 actx->fill);
-
-                                       memcpy(dst_buf + dst_off, out_tmp, rem);
-                                       out_tmp += rem;
-                                       dst_off += rem;
-                                       actx->fill -= rem;
-
-                                       if (dst_off == sg_dma_len(dst)) {
-                                               dst = sg_next(dst);
-                                               split = 0;
-                                       } else {
-                                               split = 1;
-                                       }
-                               }
+                               actx->fill = 0;
                        }
                } while (len);
 
@@ -557,6 +556,10 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
        dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf,
                                             DCP_BUF_SZ, DMA_TO_DEVICE);
 
+       ret = dma_mapping_error(sdcp->dev, buf_phys);
+       if (ret)
+               return ret;
+
        /* Fill in the DMA descriptor. */
        desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
                    MXS_DCP_CONTROL0_INTERRUPT |
@@ -589,6 +592,10 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
        if (rctx->fini) {
                digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf,
                                             DCP_SHA_PAY_SZ, DMA_FROM_DEVICE);
+               ret = dma_mapping_error(sdcp->dev, digest_phys);
+               if (ret)
+                       goto done_run;
+
                desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM;
                desc->payload = digest_phys;
        }
index 0dd4c6b..9b968ac 100644 (file)
@@ -1175,9 +1175,9 @@ static int omap_aes_probe(struct platform_device *pdev)
        spin_lock_init(&dd->lock);
 
        INIT_LIST_HEAD(&dd->list);
-       spin_lock(&list_lock);
+       spin_lock_bh(&list_lock);
        list_add_tail(&dd->list, &dev_list);
-       spin_unlock(&list_lock);
+       spin_unlock_bh(&list_lock);
 
        /* Initialize crypto engine */
        dd->engine = crypto_engine_alloc_init(dev, 1);
@@ -1264,9 +1264,9 @@ static int omap_aes_remove(struct platform_device *pdev)
        if (!dd)
                return -ENODEV;
 
-       spin_lock(&list_lock);
+       spin_lock_bh(&list_lock);
        list_del(&dd->list);
-       spin_unlock(&list_lock);
+       spin_unlock_bh(&list_lock);
 
        for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
                for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) {
index 31bdb1d..a4cc6bf 100644 (file)
@@ -210,7 +210,7 @@ void omap_crypto_cleanup(struct scatterlist *sg, struct scatterlist *orig,
        buf = sg_virt(sg);
        pages = get_order(len);
 
-       if (orig && (flags & OMAP_CRYPTO_COPY_MASK))
+       if (orig && (flags & OMAP_CRYPTO_DATA_COPIED))
                omap_crypto_copy_data(sg, orig, offset, len);
 
        if (flags & OMAP_CRYPTO_DATA_COPIED)
index bc86313..be77656 100644 (file)
@@ -1033,9 +1033,9 @@ static int omap_des_probe(struct platform_device *pdev)
 
 
        INIT_LIST_HEAD(&dd->list);
-       spin_lock(&list_lock);
+       spin_lock_bh(&list_lock);
        list_add_tail(&dd->list, &dev_list);
-       spin_unlock(&list_lock);
+       spin_unlock_bh(&list_lock);
 
        /* Initialize des crypto engine */
        dd->engine = crypto_engine_alloc_init(dev, 1);
@@ -1094,9 +1094,9 @@ static int omap_des_remove(struct platform_device *pdev)
        if (!dd)
                return -ENODEV;
 
-       spin_lock(&list_lock);
+       spin_lock_bh(&list_lock);
        list_del(&dd->list);
-       spin_unlock(&list_lock);
+       spin_unlock_bh(&list_lock);
 
        for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
                for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
index dd53ad9..f6bf53c 100644 (file)
 #define FLAGS_FINAL            1
 #define FLAGS_DMA_ACTIVE       2
 #define FLAGS_OUTPUT_READY     3
-#define FLAGS_INIT             4
 #define FLAGS_CPU              5
 #define FLAGS_DMA_READY                6
 #define FLAGS_AUTO_XOR         7
@@ -368,24 +367,6 @@ static void omap_sham_copy_ready_hash(struct ahash_request *req)
                        hash[i] = le32_to_cpup((__le32 *)in + i);
 }
 
-static int omap_sham_hw_init(struct omap_sham_dev *dd)
-{
-       int err;
-
-       err = pm_runtime_resume_and_get(dd->dev);
-       if (err < 0) {
-               dev_err(dd->dev, "failed to get sync: %d\n", err);
-               return err;
-       }
-
-       if (!test_bit(FLAGS_INIT, &dd->flags)) {
-               set_bit(FLAGS_INIT, &dd->flags);
-               dd->err = 0;
-       }
-
-       return 0;
-}
-
 static void omap_sham_write_ctrl_omap2(struct omap_sham_dev *dd, size_t length,
                                 int final, int dma)
 {
@@ -1093,11 +1074,14 @@ static int omap_sham_hash_one_req(struct crypto_engine *engine, void *areq)
        dev_dbg(dd->dev, "hash-one: op: %u, total: %u, digcnt: %zd, final: %d",
                ctx->op, ctx->total, ctx->digcnt, final);
 
-       dd->req = req;
-
-       err = omap_sham_hw_init(dd);
-       if (err)
+       err = pm_runtime_resume_and_get(dd->dev);
+       if (err < 0) {
+               dev_err(dd->dev, "failed to get sync: %d\n", err);
                return err;
+       }
+
+       dd->err = 0;
+       dd->req = req;
 
        if (ctx->digcnt)
                dd->pdata->copy_hash(req, 0);
@@ -1736,7 +1720,7 @@ static void omap_sham_done_task(unsigned long data)
                if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags))
                        goto finish;
        } else if (test_bit(FLAGS_DMA_READY, &dd->flags)) {
-               if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) {
+               if (test_bit(FLAGS_DMA_ACTIVE, &dd->flags)) {
                        omap_sham_update_dma_stop(dd);
                        if (dd->err) {
                                err = dd->err;
@@ -2129,7 +2113,6 @@ static int omap_sham_probe(struct platform_device *pdev)
        dd->fallback_sz = OMAP_SHA_DMA_THRESHOLD;
 
        pm_runtime_enable(dev);
-       pm_runtime_irq_safe(dev);
 
        err = pm_runtime_get_sync(dev);
        if (err < 0) {
@@ -2144,9 +2127,9 @@ static int omap_sham_probe(struct platform_device *pdev)
                (rev & dd->pdata->major_mask) >> dd->pdata->major_shift,
                (rev & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
 
-       spin_lock(&sham.lock);
+       spin_lock_bh(&sham.lock);
        list_add_tail(&dd->list, &sham.dev_list);
-       spin_unlock(&sham.lock);
+       spin_unlock_bh(&sham.lock);
 
        dd->engine = crypto_engine_alloc_init(dev, 1);
        if (!dd->engine) {
@@ -2194,10 +2177,11 @@ err_algs:
 err_engine_start:
        crypto_engine_exit(dd->engine);
 err_engine:
-       spin_lock(&sham.lock);
+       spin_lock_bh(&sham.lock);
        list_del(&dd->list);
-       spin_unlock(&sham.lock);
+       spin_unlock_bh(&sham.lock);
 err_pm:
+       pm_runtime_dont_use_autosuspend(dev);
        pm_runtime_disable(dev);
        if (!dd->polling_mode)
                dma_release_channel(dd->dma_lch);
@@ -2215,9 +2199,9 @@ static int omap_sham_remove(struct platform_device *pdev)
        dd = platform_get_drvdata(pdev);
        if (!dd)
                return -ENODEV;
-       spin_lock(&sham.lock);
+       spin_lock_bh(&sham.lock);
        list_del(&dd->list);
-       spin_unlock(&sham.lock);
+       spin_unlock_bh(&sham.lock);
        for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
                for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) {
                        crypto_unregister_ahash(
@@ -2225,6 +2209,7 @@ static int omap_sham_remove(struct platform_device *pdev)
                        dd->pdata->algs_info[i].registered--;
                }
        tasklet_kill(&dd->done_task);
+       pm_runtime_dont_use_autosuspend(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
 
        if (!dd->polling_mode)
@@ -2235,32 +2220,11 @@ static int omap_sham_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int omap_sham_suspend(struct device *dev)
-{
-       pm_runtime_put_sync(dev);
-       return 0;
-}
-
-static int omap_sham_resume(struct device *dev)
-{
-       int err = pm_runtime_resume_and_get(dev);
-       if (err < 0) {
-               dev_err(dev, "failed to get sync: %d\n", err);
-               return err;
-       }
-       return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(omap_sham_pm_ops, omap_sham_suspend, omap_sham_resume);
-
 static struct platform_driver omap_sham_driver = {
        .probe  = omap_sham_probe,
        .remove = omap_sham_remove,
        .driver = {
                .name   = "omap-sham",
-               .pm     = &omap_sham_pm_ops,
                .of_match_table = omap_sham_of_match,
        },
 };
index 3524ddd..33d8e50 100644 (file)
@@ -161,7 +161,7 @@ static void adf_enable_ints(struct adf_accel_dev *accel_dev)
        ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_MASK_OFFSET, 0);
 }
 
-static int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
+static int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev)
 {
        return 0;
 }
@@ -210,21 +210,21 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data)
        hw_data->fw_mmp_name = ADF_4XXX_MMP;
        hw_data->init_admin_comms = adf_init_admin_comms;
        hw_data->exit_admin_comms = adf_exit_admin_comms;
-       hw_data->disable_iov = adf_disable_sriov;
        hw_data->send_admin_init = adf_send_admin_init;
        hw_data->init_arb = adf_init_arb;
        hw_data->exit_arb = adf_exit_arb;
        hw_data->get_arb_mapping = adf_get_arbiter_mapping;
        hw_data->enable_ints = adf_enable_ints;
-       hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
        hw_data->reset_device = adf_reset_flr;
-       hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
        hw_data->admin_ae_mask = ADF_4XXX_ADMIN_AE_MASK;
        hw_data->uof_get_num_objs = uof_get_num_objs;
        hw_data->uof_get_name = uof_get_name;
        hw_data->uof_get_ae_mask = uof_get_ae_mask;
        hw_data->set_msix_rttable = set_msix_default_rttable;
        hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
+       hw_data->enable_pfvf_comms = adf_enable_pf2vf_comms;
+       hw_data->disable_iov = adf_disable_sriov;
+       hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
 
        adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
 }
index a8805c8..359fb79 100644 (file)
@@ -221,16 +221,10 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
 
        /* Set DMA identifier */
-       if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
-               if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
-                       dev_err(&pdev->dev, "No usable DMA configuration.\n");
-                       ret = -EFAULT;
-                       goto out_err;
-               } else {
-                       pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-               }
-       } else {
-               pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+       if (ret) {
+               dev_err(&pdev->dev, "No usable DMA configuration.\n");
+               goto out_err;
        }
 
        /* Get accelerator capabilities mask */
index 1dd64af..3027c01 100644 (file)
@@ -111,11 +111,6 @@ static u32 get_pf2vf_offset(u32 i)
        return ADF_C3XXX_PF2VF_OFFSET(i);
 }
 
-static u32 get_vintmsk_offset(u32 i)
-{
-       return ADF_C3XXX_VINTMSK_OFFSET(i);
-}
-
 static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
 {
        struct adf_hw_device_data *hw_device = accel_dev->hw_device;
@@ -159,8 +154,10 @@ static void adf_enable_ints(struct adf_accel_dev *accel_dev)
                   ADF_C3XXX_SMIA1_MASK);
 }
 
-static int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
+static int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev)
 {
+       spin_lock_init(&accel_dev->pf.vf2pf_ints_lock);
+
        return 0;
 }
 
@@ -193,8 +190,6 @@ void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
        hw_data->get_sram_bar_id = get_sram_bar_id;
        hw_data->get_etr_bar_id = get_etr_bar_id;
        hw_data->get_misc_bar_id = get_misc_bar_id;
-       hw_data->get_pf2vf_offset = get_pf2vf_offset;
-       hw_data->get_vintmsk_offset = get_vintmsk_offset;
        hw_data->get_admin_info = adf_gen2_get_admin_info;
        hw_data->get_arb_info = adf_gen2_get_arb_info;
        hw_data->get_sku = get_sku;
@@ -203,16 +198,18 @@ void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
        hw_data->init_admin_comms = adf_init_admin_comms;
        hw_data->exit_admin_comms = adf_exit_admin_comms;
        hw_data->configure_iov_threads = configure_iov_threads;
-       hw_data->disable_iov = adf_disable_sriov;
        hw_data->send_admin_init = adf_send_admin_init;
        hw_data->init_arb = adf_init_arb;
        hw_data->exit_arb = adf_exit_arb;
        hw_data->get_arb_mapping = adf_get_arbiter_mapping;
        hw_data->enable_ints = adf_enable_ints;
-       hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
        hw_data->reset_device = adf_reset_flr;
-       hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
        hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer;
+       hw_data->get_pf2vf_offset = get_pf2vf_offset;
+       hw_data->enable_pfvf_comms = adf_enable_pf2vf_comms;
+       hw_data->disable_iov = adf_disable_sriov;
+       hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
+
        adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
 }
 
index fece8e3..86ee02a 100644 (file)
@@ -29,7 +29,6 @@
 #define ADF_C3XXX_ERRSSMSH_EN BIT(3)
 
 #define ADF_C3XXX_PF2VF_OFFSET(i)      (0x3A000 + 0x280 + ((i) * 0x04))
-#define ADF_C3XXX_VINTMSK_OFFSET(i)    (0x3A000 + 0x200 + ((i) * 0x04))
 
 /* AE to function mapping */
 #define ADF_C3XXX_AE2FUNC_MAP_GRP_A_NUM_REGS 48
index 7fb3343..cc6e75d 100644 (file)
@@ -159,17 +159,10 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
 
        /* set dma identifier */
-       if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
-               if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
-                       dev_err(&pdev->dev, "No usable DMA configuration\n");
-                       ret = -EFAULT;
-                       goto out_err_disable;
-               } else {
-                       pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-               }
-
-       } else {
-               pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
+       if (ret) {
+               dev_err(&pdev->dev, "No usable DMA configuration\n");
+               goto out_err_disable;
        }
 
        if (pci_request_regions(pdev, ADF_C3XXX_DEVICE_NAME)) {
@@ -208,12 +201,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (pci_save_state(pdev)) {
                dev_err(&pdev->dev, "Failed to save pci state\n");
                ret = -ENOMEM;
-               goto out_err_free_reg;
+               goto out_err_disable_aer;
        }
 
        ret = qat_crypto_dev_config(accel_dev);
        if (ret)
-               goto out_err_free_reg;
+               goto out_err_disable_aer;
 
        ret = adf_dev_init(accel_dev);
        if (ret)
@@ -229,6 +222,8 @@ out_err_dev_stop:
        adf_dev_stop(accel_dev);
 out_err_dev_shutdown:
        adf_dev_shutdown(accel_dev);
+out_err_disable_aer:
+       adf_disable_aer(accel_dev);
 out_err_free_reg:
        pci_release_regions(accel_pci_dev->pci_dev);
 out_err_disable:
index 15f6b9b..3e69b52 100644 (file)
@@ -52,11 +52,6 @@ static u32 get_pf2vf_offset(u32 i)
        return ADF_C3XXXIOV_PF2VF_OFFSET;
 }
 
-static u32 get_vintmsk_offset(u32 i)
-{
-       return ADF_C3XXXIOV_VINTMSK_OFFSET;
-}
-
 static int adf_vf_int_noop(struct adf_accel_dev *accel_dev)
 {
        return 0;
@@ -81,10 +76,10 @@ void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data)
        hw_data->enable_error_correction = adf_vf_void_noop;
        hw_data->init_admin_comms = adf_vf_int_noop;
        hw_data->exit_admin_comms = adf_vf_void_noop;
-       hw_data->send_admin_init = adf_vf2pf_init;
+       hw_data->send_admin_init = adf_vf2pf_notify_init;
        hw_data->init_arb = adf_vf_int_noop;
        hw_data->exit_arb = adf_vf_void_noop;
-       hw_data->disable_iov = adf_vf2pf_shutdown;
+       hw_data->disable_iov = adf_vf2pf_notify_shutdown;
        hw_data->get_accel_mask = get_accel_mask;
        hw_data->get_ae_mask = get_ae_mask;
        hw_data->get_num_accels = get_num_accels;
@@ -92,11 +87,10 @@ void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data)
        hw_data->get_etr_bar_id = get_etr_bar_id;
        hw_data->get_misc_bar_id = get_misc_bar_id;
        hw_data->get_pf2vf_offset = get_pf2vf_offset;
-       hw_data->get_vintmsk_offset = get_vintmsk_offset;
        hw_data->get_sku = get_sku;
        hw_data->enable_ints = adf_vf_void_noop;
-       hw_data->enable_vf2pf_comms = adf_enable_vf2pf_comms;
-       hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
+       hw_data->enable_pfvf_comms = adf_enable_vf2pf_comms;
+       hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
        hw_data->dev_class->instances++;
        adf_devmgr_update_class_index(hw_data);
        adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
index 7945a9c..f5de4ce 100644 (file)
@@ -13,7 +13,6 @@
 #define ADF_C3XXXIOV_ETR_BAR 0
 #define ADF_C3XXXIOV_ETR_MAX_BANKS 1
 #define ADF_C3XXXIOV_PF2VF_OFFSET      0x200
-#define ADF_C3XXXIOV_VINTMSK_OFFSET    0x208
 
 void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data);
 void adf_clean_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data);
index 067ca5e..1df1b86 100644 (file)
@@ -141,17 +141,10 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
 
        /* set dma identifier */
-       if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
-               if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
-                       dev_err(&pdev->dev, "No usable DMA configuration\n");
-                       ret = -EFAULT;
-                       goto out_err_disable;
-               } else {
-                       pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-               }
-
-       } else {
-               pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
+       if (ret) {
+               dev_err(&pdev->dev, "No usable DMA configuration\n");
+               goto out_err_disable;
        }
 
        if (pci_request_regions(pdev, ADF_C3XXXVF_DEVICE_NAME)) {
@@ -218,6 +211,7 @@ static void adf_remove(struct pci_dev *pdev)
                pr_err("QAT: Driver removal failed\n");
                return;
        }
+       adf_flush_vf_wq(accel_dev);
        adf_dev_stop(accel_dev);
        adf_dev_shutdown(accel_dev);
        adf_cleanup_accel(accel_dev);
index 3033739..b023c80 100644 (file)
@@ -113,11 +113,6 @@ static u32 get_pf2vf_offset(u32 i)
        return ADF_C62X_PF2VF_OFFSET(i);
 }
 
-static u32 get_vintmsk_offset(u32 i)
-{
-       return ADF_C62X_VINTMSK_OFFSET(i);
-}
-
 static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
 {
        struct adf_hw_device_data *hw_device = accel_dev->hw_device;
@@ -161,8 +156,10 @@ static void adf_enable_ints(struct adf_accel_dev *accel_dev)
                   ADF_C62X_SMIA1_MASK);
 }
 
-static int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
+static int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev)
 {
+       spin_lock_init(&accel_dev->pf.vf2pf_ints_lock);
+
        return 0;
 }
 
@@ -195,8 +192,6 @@ void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data)
        hw_data->get_sram_bar_id = get_sram_bar_id;
        hw_data->get_etr_bar_id = get_etr_bar_id;
        hw_data->get_misc_bar_id = get_misc_bar_id;
-       hw_data->get_pf2vf_offset = get_pf2vf_offset;
-       hw_data->get_vintmsk_offset = get_vintmsk_offset;
        hw_data->get_admin_info = adf_gen2_get_admin_info;
        hw_data->get_arb_info = adf_gen2_get_arb_info;
        hw_data->get_sku = get_sku;
@@ -205,16 +200,18 @@ void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data)
        hw_data->init_admin_comms = adf_init_admin_comms;
        hw_data->exit_admin_comms = adf_exit_admin_comms;
        hw_data->configure_iov_threads = configure_iov_threads;
-       hw_data->disable_iov = adf_disable_sriov;
        hw_data->send_admin_init = adf_send_admin_init;
        hw_data->init_arb = adf_init_arb;
        hw_data->exit_arb = adf_exit_arb;
        hw_data->get_arb_mapping = adf_get_arbiter_mapping;
        hw_data->enable_ints = adf_enable_ints;
-       hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
        hw_data->reset_device = adf_reset_flr;
-       hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
        hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer;
+       hw_data->get_pf2vf_offset = get_pf2vf_offset;
+       hw_data->enable_pfvf_comms = adf_enable_pf2vf_comms;
+       hw_data->disable_iov = adf_disable_sriov;
+       hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
+
        adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
 }
 
index 53d3cb5..e6664bd 100644 (file)
@@ -30,7 +30,6 @@
 #define ADF_C62X_ERRSSMSH_EN BIT(3)
 
 #define ADF_C62X_PF2VF_OFFSET(i)       (0x3A000 + 0x280 + ((i) * 0x04))
-#define ADF_C62X_VINTMSK_OFFSET(i)     (0x3A000 + 0x200 + ((i) * 0x04))
 
 /* AE to function mapping */
 #define ADF_C62X_AE2FUNC_MAP_GRP_A_NUM_REGS 80
index 1f5de44..bf251df 100644 (file)
@@ -159,17 +159,10 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
 
        /* set dma identifier */
-       if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
-               if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
-                       dev_err(&pdev->dev, "No usable DMA configuration\n");
-                       ret = -EFAULT;
-                       goto out_err_disable;
-               } else {
-                       pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-               }
-
-       } else {
-               pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
+       if (ret) {
+               dev_err(&pdev->dev, "No usable DMA configuration\n");
+               goto out_err_disable;
        }
 
        if (pci_request_regions(pdev, ADF_C62X_DEVICE_NAME)) {
@@ -208,12 +201,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (pci_save_state(pdev)) {
                dev_err(&pdev->dev, "Failed to save pci state\n");
                ret = -ENOMEM;
-               goto out_err_free_reg;
+               goto out_err_disable_aer;
        }
 
        ret = qat_crypto_dev_config(accel_dev);
        if (ret)
-               goto out_err_free_reg;
+               goto out_err_disable_aer;
 
        ret = adf_dev_init(accel_dev);
        if (ret)
@@ -229,6 +222,8 @@ out_err_dev_stop:
        adf_dev_stop(accel_dev);
 out_err_dev_shutdown:
        adf_dev_shutdown(accel_dev);
+out_err_disable_aer:
+       adf_disable_aer(accel_dev);
 out_err_free_reg:
        pci_release_regions(accel_pci_dev->pci_dev);
 out_err_disable:
index d231583..3bee3e4 100644 (file)
@@ -52,11 +52,6 @@ static u32 get_pf2vf_offset(u32 i)
        return ADF_C62XIOV_PF2VF_OFFSET;
 }
 
-static u32 get_vintmsk_offset(u32 i)
-{
-       return ADF_C62XIOV_VINTMSK_OFFSET;
-}
-
 static int adf_vf_int_noop(struct adf_accel_dev *accel_dev)
 {
        return 0;
@@ -81,10 +76,10 @@ void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data)
        hw_data->enable_error_correction = adf_vf_void_noop;
        hw_data->init_admin_comms = adf_vf_int_noop;
        hw_data->exit_admin_comms = adf_vf_void_noop;
-       hw_data->send_admin_init = adf_vf2pf_init;
+       hw_data->send_admin_init = adf_vf2pf_notify_init;
        hw_data->init_arb = adf_vf_int_noop;
        hw_data->exit_arb = adf_vf_void_noop;
-       hw_data->disable_iov = adf_vf2pf_shutdown;
+       hw_data->disable_iov = adf_vf2pf_notify_shutdown;
        hw_data->get_accel_mask = get_accel_mask;
        hw_data->get_ae_mask = get_ae_mask;
        hw_data->get_num_accels = get_num_accels;
@@ -92,11 +87,10 @@ void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data)
        hw_data->get_etr_bar_id = get_etr_bar_id;
        hw_data->get_misc_bar_id = get_misc_bar_id;
        hw_data->get_pf2vf_offset = get_pf2vf_offset;
-       hw_data->get_vintmsk_offset = get_vintmsk_offset;
        hw_data->get_sku = get_sku;
        hw_data->enable_ints = adf_vf_void_noop;
-       hw_data->enable_vf2pf_comms = adf_enable_vf2pf_comms;
-       hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
+       hw_data->enable_pfvf_comms = adf_enable_vf2pf_comms;
+       hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
        hw_data->dev_class->instances++;
        adf_devmgr_update_class_index(hw_data);
        adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
index a6c04cf..794778c 100644 (file)
@@ -13,7 +13,6 @@
 #define ADF_C62XIOV_ETR_BAR 0
 #define ADF_C62XIOV_ETR_MAX_BANKS 1
 #define ADF_C62XIOV_PF2VF_OFFSET       0x200
-#define ADF_C62XIOV_VINTMSK_OFFSET     0x208
 
 void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data);
 void adf_clean_hw_data_c62xiov(struct adf_hw_device_data *hw_data);
index 51ea88c..8103bd8 100644 (file)
@@ -141,17 +141,10 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
 
        /* set dma identifier */
-       if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
-               if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
-                       dev_err(&pdev->dev, "No usable DMA configuration\n");
-                       ret = -EFAULT;
-                       goto out_err_disable;
-               } else {
-                       pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-               }
-
-       } else {
-               pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
+       if (ret) {
+               dev_err(&pdev->dev, "No usable DMA configuration\n");
+               goto out_err_disable;
        }
 
        if (pci_request_regions(pdev, ADF_C62XVF_DEVICE_NAME)) {
@@ -218,6 +211,7 @@ static void adf_remove(struct pci_dev *pdev)
                pr_err("QAT: Driver removal failed\n");
                return;
        }
+       adf_flush_vf_wq(accel_dev);
        adf_dev_stop(accel_dev);
        adf_dev_shutdown(accel_dev);
        adf_cleanup_accel(accel_dev);
index ac435b4..38c0af6 100644 (file)
@@ -18,8 +18,6 @@
 #define ADF_4XXX_DEVICE_NAME "4xxx"
 #define ADF_4XXX_PCI_DEVICE_ID 0x4940
 #define ADF_4XXXIOV_PCI_DEVICE_ID 0x4941
-#define ADF_ERRSOU3 (0x3A000 + 0x0C)
-#define ADF_ERRSOU5 (0x3A000 + 0xD8)
 #define ADF_DEVICE_FUSECTL_OFFSET 0x40
 #define ADF_DEVICE_LEGFUSE_OFFSET 0x4C
 #define ADF_DEVICE_FUSECTL_MASK 0x80000000
@@ -156,7 +154,6 @@ struct adf_hw_device_data {
        u32 (*get_num_aes)(struct adf_hw_device_data *self);
        u32 (*get_num_accels)(struct adf_hw_device_data *self);
        u32 (*get_pf2vf_offset)(u32 i);
-       u32 (*get_vintmsk_offset)(u32 i);
        void (*get_arb_info)(struct arb_info *arb_csrs_info);
        void (*get_admin_info)(struct admin_info *admin_csrs_info);
        enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self);
@@ -174,7 +171,7 @@ struct adf_hw_device_data {
                                      bool enable);
        void (*enable_ints)(struct adf_accel_dev *accel_dev);
        void (*set_ssm_wdtimer)(struct adf_accel_dev *accel_dev);
-       int (*enable_vf2pf_comms)(struct adf_accel_dev *accel_dev);
+       int (*enable_pfvf_comms)(struct adf_accel_dev *accel_dev);
        void (*reset_device)(struct adf_accel_dev *accel_dev);
        void (*set_msix_rttable)(struct adf_accel_dev *accel_dev);
        char *(*uof_get_name)(u32 obj_num);
@@ -227,7 +224,6 @@ struct adf_fw_loader_data {
 
 struct adf_accel_vf_info {
        struct adf_accel_dev *accel_dev;
-       struct tasklet_struct vf2pf_bh_tasklet;
        struct mutex pf2vf_lock; /* protect CSR access for PF2VF messages */
        struct ratelimit_state vf2pf_ratelimit;
        u32 vf_nr;
@@ -249,6 +245,8 @@ struct adf_accel_dev {
        struct adf_accel_pci accel_pci_dev;
        union {
                struct {
+                       /* protects VF2PF interrupts access */
+                       spinlock_t vf2pf_ints_lock;
                        /* vf_info is non-zero when SR-IOV is init'ed */
                        struct adf_accel_vf_info *vf_info;
                } pf;
index d2ae293..ed3e40b 100644 (file)
@@ -194,7 +194,7 @@ int adf_enable_aer(struct adf_accel_dev *accel_dev)
 EXPORT_SYMBOL_GPL(adf_enable_aer);
 
 /**
- * adf_disable_aer() - Enable Advance Error Reporting for acceleration device
+ * adf_disable_aer() - Disable Advance Error Reporting for acceleration device
  * @accel_dev:  Pointer to acceleration device.
  *
  * Function disables PCI Advance Error Reporting for the
index c614765..4261749 100644 (file)
@@ -193,22 +193,23 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs);
 void adf_disable_sriov(struct adf_accel_dev *accel_dev);
 void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
                                  u32 vf_mask);
+void adf_disable_vf2pf_interrupts_irq(struct adf_accel_dev *accel_dev,
+                                     u32 vf_mask);
 void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
                                 u32 vf_mask);
 void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
 void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
+void adf_schedule_vf2pf_handler(struct adf_accel_vf_info *vf_info);
 
-int adf_vf2pf_init(struct adf_accel_dev *accel_dev);
-void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev);
+int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev);
+void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev);
 int adf_init_pf_wq(void);
 void adf_exit_pf_wq(void);
 int adf_init_vf_wq(void);
 void adf_exit_vf_wq(void);
+void adf_flush_vf_wq(struct adf_accel_dev *accel_dev);
 #else
-static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
-{
-       return 0;
-}
+#define adf_sriov_configure NULL
 
 static inline void adf_disable_sriov(struct adf_accel_dev *accel_dev)
 {
@@ -222,12 +223,12 @@ static inline void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
 {
 }
 
-static inline int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
+static inline int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev)
 {
        return 0;
 }
 
-static inline void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
+static inline void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev)
 {
 }
 
@@ -249,5 +250,9 @@ static inline void adf_exit_vf_wq(void)
 {
 }
 
+static inline void adf_flush_vf_wq(struct adf_accel_dev *accel_dev)
+{
+}
+
 #endif
 #endif
index 744c403..60bc7b9 100644 (file)
@@ -61,6 +61,7 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
        struct service_hndl *service;
        struct list_head *list_itr;
        struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       int ret;
 
        if (!hw_data) {
                dev_err(&GET_DEV(accel_dev),
@@ -88,8 +89,6 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
                return -EFAULT;
        }
 
-       hw_data->enable_ints(accel_dev);
-
        if (adf_ae_init(accel_dev)) {
                dev_err(&GET_DEV(accel_dev),
                        "Failed to initialise Acceleration Engine\n");
@@ -110,6 +109,13 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
        }
        set_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
 
+       hw_data->enable_ints(accel_dev);
+       hw_data->enable_error_correction(accel_dev);
+
+       ret = hw_data->enable_pfvf_comms(accel_dev);
+       if (ret)
+               return ret;
+
        /*
         * Subservice initialisation is divided into two stages: init and start.
         * This is to facilitate any ordering dependencies between services
@@ -126,9 +132,6 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
                set_bit(accel_dev->accel_id, service->init_status);
        }
 
-       hw_data->enable_error_correction(accel_dev);
-       hw_data->enable_vf2pf_comms(accel_dev);
-
        return 0;
 }
 EXPORT_SYMBOL_GPL(adf_dev_init);
index e3ad558..c678d5c 100644 (file)
 #include "adf_transport_access_macros.h"
 #include "adf_transport_internal.h"
 
+#define ADF_MAX_NUM_VFS        32
+#define ADF_ERRSOU3    (0x3A000 + 0x0C)
+#define ADF_ERRSOU5    (0x3A000 + 0xD8)
+#define ADF_ERRMSK3    (0x3A000 + 0x1C)
+#define ADF_ERRMSK5    (0x3A000 + 0xDC)
+#define ADF_ERR_REG_VF2PF_L(vf_src)    (((vf_src) & 0x01FFFE00) >> 9)
+#define ADF_ERR_REG_VF2PF_U(vf_src)    (((vf_src) & 0x0000FFFF) << 16)
+
 static int adf_enable_msix(struct adf_accel_dev *accel_dev)
 {
        struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
@@ -71,14 +79,23 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
                struct adf_hw_device_data *hw_data = accel_dev->hw_device;
                struct adf_bar *pmisc =
                        &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
-               void __iomem *pmisc_bar_addr = pmisc->virt_addr;
-               u32 vf_mask;
+               void __iomem *pmisc_addr = pmisc->virt_addr;
+               u32 errsou3, errsou5, errmsk3, errmsk5;
+               unsigned long vf_mask;
 
                /* Get the interrupt sources triggered by VFs */
-               vf_mask = ((ADF_CSR_RD(pmisc_bar_addr, ADF_ERRSOU5) &
-                           0x0000FFFF) << 16) |
-                         ((ADF_CSR_RD(pmisc_bar_addr, ADF_ERRSOU3) &
-                           0x01FFFE00) >> 9);
+               errsou3 = ADF_CSR_RD(pmisc_addr, ADF_ERRSOU3);
+               errsou5 = ADF_CSR_RD(pmisc_addr, ADF_ERRSOU5);
+               vf_mask = ADF_ERR_REG_VF2PF_L(errsou3);
+               vf_mask |= ADF_ERR_REG_VF2PF_U(errsou5);
+
+               /* To avoid adding duplicate entries to work queue, clear
+                * vf_int_mask_sets bits that are already masked in ERRMSK register.
+                */
+               errmsk3 = ADF_CSR_RD(pmisc_addr, ADF_ERRMSK3);
+               errmsk5 = ADF_CSR_RD(pmisc_addr, ADF_ERRMSK5);
+               vf_mask &= ~ADF_ERR_REG_VF2PF_L(errmsk3);
+               vf_mask &= ~ADF_ERR_REG_VF2PF_U(errmsk5);
 
                if (vf_mask) {
                        struct adf_accel_vf_info *vf_info;
@@ -86,15 +103,13 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
                        int i;
 
                        /* Disable VF2PF interrupts for VFs with pending ints */
-                       adf_disable_vf2pf_interrupts(accel_dev, vf_mask);
+                       adf_disable_vf2pf_interrupts_irq(accel_dev, vf_mask);
 
                        /*
-                        * Schedule tasklets to handle VF2PF interrupt BHs
-                        * unless the VF is malicious and is attempting to
-                        * flood the host OS with VF2PF interrupts.
+                        * Handle VF2PF interrupt unless the VF is malicious and
+                        * is attempting to flood the host OS with VF2PF interrupts.
                         */
-                       for_each_set_bit(i, (const unsigned long *)&vf_mask,
-                                        (sizeof(vf_mask) * BITS_PER_BYTE)) {
+                       for_each_set_bit(i, &vf_mask, ADF_MAX_NUM_VFS) {
                                vf_info = accel_dev->pf.vf_info + i;
 
                                if (!__ratelimit(&vf_info->vf2pf_ratelimit)) {
@@ -104,8 +119,7 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
                                        continue;
                                }
 
-                               /* Tasklet will re-enable ints from this VF */
-                               tasklet_hi_schedule(&vf_info->vf2pf_bh_tasklet);
+                               adf_schedule_vf2pf_handler(vf_info);
                                irq_handled = true;
                        }
 
index a1b77bd..976b9ab 100644 (file)
 #define ADF_DH895XCC_ERRMSK5   (ADF_DH895XCC_EP_OFFSET + 0xDC)
 #define ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask) (vf_mask >> 16)
 
-void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
-{
-       struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       void __iomem *pmisc_bar_addr =
-               pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
-
-       ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x0);
-}
-
-void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
-{
-       struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       void __iomem *pmisc_bar_addr =
-               pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
-
-       ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x2);
-}
-
-void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
-                                u32 vf_mask)
+static void __adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
+                                         u32 vf_mask)
 {
        struct adf_hw_device_data *hw_data = accel_dev->hw_device;
        struct adf_bar *pmisc =
@@ -55,7 +35,17 @@ void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
        }
 }
 
-void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
+void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags);
+       __adf_enable_vf2pf_interrupts(accel_dev, vf_mask);
+       spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags);
+}
+
+static void __adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
+                                          u32 vf_mask)
 {
        struct adf_hw_device_data *hw_data = accel_dev->hw_device;
        struct adf_bar *pmisc =
@@ -78,6 +68,22 @@ void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
        }
 }
 
+void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags);
+       __adf_disable_vf2pf_interrupts(accel_dev, vf_mask);
+       spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags);
+}
+
+void adf_disable_vf2pf_interrupts_irq(struct adf_accel_dev *accel_dev, u32 vf_mask)
+{
+       spin_lock(&accel_dev->pf.vf2pf_ints_lock);
+       __adf_disable_vf2pf_interrupts(accel_dev, vf_mask);
+       spin_unlock(&accel_dev->pf.vf2pf_ints_lock);
+}
+
 static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
 {
        struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
@@ -186,7 +192,6 @@ int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
 
        return ret;
 }
-EXPORT_SYMBOL_GPL(adf_iov_putmsg);
 
 void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
 {
@@ -216,7 +221,7 @@ void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
                resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
                         (ADF_PF2VF_MSGTYPE_VERSION_RESP <<
                          ADF_PF2VF_MSGTYPE_SHIFT) |
-                        (ADF_PFVF_COMPATIBILITY_VERSION <<
+                        (ADF_PFVF_COMPAT_THIS_VERSION <<
                          ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
 
                dev_dbg(&GET_DEV(accel_dev),
@@ -226,19 +231,19 @@ void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
                if (vf_compat_ver < hw_data->min_iov_compat_ver) {
                        dev_err(&GET_DEV(accel_dev),
                                "VF (vers %d) incompatible with PF (vers %d)\n",
-                               vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
+                               vf_compat_ver, ADF_PFVF_COMPAT_THIS_VERSION);
                        resp |= ADF_PF2VF_VF_INCOMPATIBLE <<
                                ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
-               } else if (vf_compat_ver > ADF_PFVF_COMPATIBILITY_VERSION) {
+               } else if (vf_compat_ver > ADF_PFVF_COMPAT_THIS_VERSION) {
                        dev_err(&GET_DEV(accel_dev),
                                "VF (vers %d) compat with PF (vers %d) unkn.\n",
-                               vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
+                               vf_compat_ver, ADF_PFVF_COMPAT_THIS_VERSION);
                        resp |= ADF_PF2VF_VF_COMPAT_UNKNOWN <<
                                ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
                } else {
                        dev_dbg(&GET_DEV(accel_dev),
                                "VF (vers %d) compatible with PF (vers %d)\n",
-                               vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
+                               vf_compat_ver, ADF_PFVF_COMPAT_THIS_VERSION);
                        resp |= ADF_PF2VF_VF_COMPATIBLE <<
                                ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
                }
@@ -251,7 +256,7 @@ void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
                resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
                         (ADF_PF2VF_MSGTYPE_VERSION_RESP <<
                          ADF_PF2VF_MSGTYPE_SHIFT) |
-                        (ADF_PFVF_COMPATIBILITY_VERSION <<
+                        (ADF_PFVF_COMPAT_THIS_VERSION <<
                          ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
                resp |= ADF_PF2VF_VF_COMPATIBLE <<
                        ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
@@ -284,6 +289,7 @@ void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
 
        /* re-enable interrupt on PF from this VF */
        adf_enable_vf2pf_interrupts(accel_dev, (1 << vf_nr));
+
        return;
 err:
        dev_dbg(&GET_DEV(accel_dev), "Unknown message from VF%d (0x%x);\n",
@@ -313,8 +319,10 @@ static int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
 
        msg = ADF_VF2PF_MSGORIGIN_SYSTEM;
        msg |= ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ << ADF_VF2PF_MSGTYPE_SHIFT;
-       msg |= ADF_PFVF_COMPATIBILITY_VERSION << ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
-       BUILD_BUG_ON(ADF_PFVF_COMPATIBILITY_VERSION > 255);
+       msg |= ADF_PFVF_COMPAT_THIS_VERSION << ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
+       BUILD_BUG_ON(ADF_PFVF_COMPAT_THIS_VERSION > 255);
+
+       reinit_completion(&accel_dev->vf.iov_msg_completion);
 
        /* Send request from VF to PF */
        ret = adf_iov_putmsg(accel_dev, msg, 0);
@@ -338,14 +346,16 @@ static int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
                break;
        case ADF_PF2VF_VF_COMPAT_UNKNOWN:
                /* VF is newer than PF and decides whether it is compatible */
-               if (accel_dev->vf.pf_version >= hw_data->min_iov_compat_ver)
+               if (accel_dev->vf.pf_version >= hw_data->min_iov_compat_ver) {
+                       accel_dev->vf.compatible = ADF_PF2VF_VF_COMPATIBLE;
                        break;
+               }
                fallthrough;
        case ADF_PF2VF_VF_INCOMPATIBLE:
                dev_err(&GET_DEV(accel_dev),
                        "PF (vers %d) and VF (vers %d) are not compatible\n",
                        accel_dev->vf.pf_version,
-                       ADF_PFVF_COMPATIBILITY_VERSION);
+                       ADF_PFVF_COMPAT_THIS_VERSION);
                return -EINVAL;
        default:
                dev_err(&GET_DEV(accel_dev),
index 0690c03..ffd43aa 100644 (file)
@@ -52,7 +52,7 @@
  * IN_USE_BY pattern as part of a collision control scheme (see adf_iov_putmsg).
  */
 
-#define ADF_PFVF_COMPATIBILITY_VERSION         0x1     /* PF<->VF compat */
+#define ADF_PFVF_COMPAT_THIS_VERSION           0x1     /* PF<->VF compat */
 
 /* PF->VF messages */
 #define ADF_PF2VF_INT                          BIT(0)
index 8c822c2..90ec057 100644 (file)
@@ -24,9 +24,8 @@ static void adf_iov_send_resp(struct work_struct *work)
        kfree(pf2vf_resp);
 }
 
-static void adf_vf2pf_bh_handler(void *data)
+void adf_schedule_vf2pf_handler(struct adf_accel_vf_info *vf_info)
 {
-       struct adf_accel_vf_info *vf_info = (struct adf_accel_vf_info *)data;
        struct adf_pf2vf_resp *pf2vf_resp;
 
        pf2vf_resp = kzalloc(sizeof(*pf2vf_resp), GFP_ATOMIC);
@@ -52,9 +51,6 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
                vf_info->accel_dev = accel_dev;
                vf_info->vf_nr = i;
 
-               tasklet_init(&vf_info->vf2pf_bh_tasklet,
-                            (void *)adf_vf2pf_bh_handler,
-                            (unsigned long)vf_info);
                mutex_init(&vf_info->pf2vf_lock);
                ratelimit_state_init(&vf_info->vf2pf_ratelimit,
                                     DEFAULT_RATELIMIT_INTERVAL,
@@ -110,8 +106,6 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev)
                hw_data->configure_iov_threads(accel_dev, false);
 
        for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++) {
-               tasklet_disable(&vf->vf2pf_bh_tasklet);
-               tasklet_kill(&vf->vf2pf_bh_tasklet);
                mutex_destroy(&vf->pf2vf_lock);
        }
 
index e85bd62..3e25fac 100644 (file)
@@ -5,14 +5,14 @@
 #include "adf_pf2vf_msg.h"
 
 /**
- * adf_vf2pf_init() - send init msg to PF
+ * adf_vf2pf_notify_init() - send init msg to PF
  * @accel_dev:  Pointer to acceleration VF device.
  *
  * Function sends an init message from the VF to a PF
  *
  * Return: 0 on success, error code otherwise.
  */
-int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
+int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev)
 {
        u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
                (ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT));
@@ -25,17 +25,17 @@ int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
        set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
        return 0;
 }
-EXPORT_SYMBOL_GPL(adf_vf2pf_init);
+EXPORT_SYMBOL_GPL(adf_vf2pf_notify_init);
 
 /**
- * adf_vf2pf_shutdown() - send shutdown msg to PF
+ * adf_vf2pf_notify_shutdown() - send shutdown msg to PF
  * @accel_dev:  Pointer to acceleration VF device.
  *
  * Function sends a shutdown message from the VF to a PF
  *
  * Return: void
  */
-void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
+void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev)
 {
        u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
            (ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT));
@@ -45,4 +45,4 @@ void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
                        dev_err(&GET_DEV(accel_dev),
                                "Failed to send Shutdown event to PF\n");
 }
-EXPORT_SYMBOL_GPL(adf_vf2pf_shutdown);
+EXPORT_SYMBOL_GPL(adf_vf2pf_notify_shutdown);
index 888388a..7828a65 100644 (file)
@@ -18,6 +18,7 @@
 #include "adf_pf2vf_msg.h"
 
 #define ADF_VINTSOU_OFFSET     0x204
+#define ADF_VINTMSK_OFFSET     0x208
 #define ADF_VINTSOU_BUN                BIT(0)
 #define ADF_VINTSOU_PF2VF      BIT(1)
 
@@ -28,6 +29,27 @@ struct adf_vf_stop_data {
        struct work_struct work;
 };
 
+void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
+{
+       struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       void __iomem *pmisc_bar_addr =
+               pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
+
+       ADF_CSR_WR(pmisc_bar_addr, ADF_VINTMSK_OFFSET, 0x0);
+}
+
+void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
+{
+       struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       void __iomem *pmisc_bar_addr =
+               pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
+
+       ADF_CSR_WR(pmisc_bar_addr, ADF_VINTMSK_OFFSET, 0x2);
+}
+EXPORT_SYMBOL_GPL(adf_disable_pf2vf_interrupts);
+
 static int adf_enable_msi(struct adf_accel_dev *accel_dev)
 {
        struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
@@ -160,11 +182,21 @@ static irqreturn_t adf_isr(int irq, void *privdata)
        struct adf_bar *pmisc =
                        &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
        void __iomem *pmisc_bar_addr = pmisc->virt_addr;
-       u32 v_int;
+       bool handled = false;
+       u32 v_int, v_mask;
 
        /* Read VF INT source CSR to determine the source of VF interrupt */
        v_int = ADF_CSR_RD(pmisc_bar_addr, ADF_VINTSOU_OFFSET);
 
+       /* Read VF INT mask CSR to determine which sources are masked */
+       v_mask = ADF_CSR_RD(pmisc_bar_addr, ADF_VINTMSK_OFFSET);
+
+       /*
+        * Recompute v_int ignoring sources that are masked. This is to
+        * avoid rescheduling the tasklet for interrupts already handled
+        */
+       v_int &= ~v_mask;
+
        /* Check for PF2VF interrupt */
        if (v_int & ADF_VINTSOU_PF2VF) {
                /* Disable PF to VF interrupt */
@@ -172,7 +204,7 @@ static irqreturn_t adf_isr(int irq, void *privdata)
 
                /* Schedule tasklet to handle interrupt BH */
                tasklet_hi_schedule(&accel_dev->vf.pf2vf_bh_tasklet);
-               return IRQ_HANDLED;
+               handled = true;
        }
 
        /* Check bundle interrupt */
@@ -184,10 +216,10 @@ static irqreturn_t adf_isr(int irq, void *privdata)
                csr_ops->write_csr_int_flag_and_col(bank->csr_addr,
                                                    bank->bank_number, 0);
                tasklet_hi_schedule(&bank->resp_handler);
-               return IRQ_HANDLED;
+               handled = true;
        }
 
-       return IRQ_NONE;
+       return handled ? IRQ_HANDLED : IRQ_NONE;
 }
 
 static int adf_request_msi_irq(struct adf_accel_dev *accel_dev)
@@ -285,6 +317,30 @@ err_out:
 }
 EXPORT_SYMBOL_GPL(adf_vf_isr_resource_alloc);
 
+/**
+ * adf_flush_vf_wq() - Flush workqueue for VF
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function disables the PF/VF interrupts on the VF so that no new messages
+ * are received and flushes the workqueue 'adf_vf_stop_wq'.
+ *
+ * Return: void.
+ */
+void adf_flush_vf_wq(struct adf_accel_dev *accel_dev)
+{
+       adf_disable_pf2vf_interrupts(accel_dev);
+
+       flush_workqueue(adf_vf_stop_wq);
+}
+EXPORT_SYMBOL_GPL(adf_flush_vf_wq);
+
+/**
+ * adf_init_vf_wq() - Init workqueue for VF
+ *
+ * Function init workqueue 'adf_vf_stop_wq' for VF.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
 int __init adf_init_vf_wq(void)
 {
        adf_vf_stop_wq = alloc_workqueue("adf_vf_stop_wq", WQ_MEM_RECLAIM, 0);
index 7dd7cd6..0a9ce36 100644 (file)
@@ -131,11 +131,6 @@ static u32 get_pf2vf_offset(u32 i)
        return ADF_DH895XCC_PF2VF_OFFSET(i);
 }
 
-static u32 get_vintmsk_offset(u32 i)
-{
-       return ADF_DH895XCC_VINTMSK_OFFSET(i);
-}
-
 static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
 {
        struct adf_hw_device_data *hw_device = accel_dev->hw_device;
@@ -180,8 +175,10 @@ static void adf_enable_ints(struct adf_accel_dev *accel_dev)
                   ADF_DH895XCC_SMIA1_MASK);
 }
 
-static int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
+static int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev)
 {
+       spin_lock_init(&accel_dev->pf.vf2pf_ints_lock);
+
        return 0;
 }
 
@@ -213,8 +210,6 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
        hw_data->get_num_aes = get_num_aes;
        hw_data->get_etr_bar_id = get_etr_bar_id;
        hw_data->get_misc_bar_id = get_misc_bar_id;
-       hw_data->get_pf2vf_offset = get_pf2vf_offset;
-       hw_data->get_vintmsk_offset = get_vintmsk_offset;
        hw_data->get_admin_info = adf_gen2_get_admin_info;
        hw_data->get_arb_info = adf_gen2_get_arb_info;
        hw_data->get_sram_bar_id = get_sram_bar_id;
@@ -224,15 +219,17 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
        hw_data->init_admin_comms = adf_init_admin_comms;
        hw_data->exit_admin_comms = adf_exit_admin_comms;
        hw_data->configure_iov_threads = configure_iov_threads;
-       hw_data->disable_iov = adf_disable_sriov;
        hw_data->send_admin_init = adf_send_admin_init;
        hw_data->init_arb = adf_init_arb;
        hw_data->exit_arb = adf_exit_arb;
        hw_data->get_arb_mapping = adf_get_arbiter_mapping;
        hw_data->enable_ints = adf_enable_ints;
-       hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
        hw_data->reset_device = adf_reset_sbr;
-       hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
+       hw_data->get_pf2vf_offset = get_pf2vf_offset;
+       hw_data->enable_pfvf_comms = adf_enable_pf2vf_comms;
+       hw_data->disable_iov = adf_disable_sriov;
+       hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
+
        adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
 }
 
index 4d61392..f99319c 100644 (file)
@@ -35,7 +35,6 @@
 #define ADF_DH895XCC_ERRSSMSH_EN BIT(3)
 
 #define ADF_DH895XCC_PF2VF_OFFSET(i)   (0x3A000 + 0x280 + ((i) * 0x04))
-#define ADF_DH895XCC_VINTMSK_OFFSET(i) (0x3A000 + 0x200 + ((i) * 0x04))
 
 /* AE to function mapping */
 #define ADF_DH895XCC_AE2FUNC_MAP_GRP_A_NUM_REGS 96
index a9ec435..3976a81 100644 (file)
@@ -159,17 +159,10 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
 
        /* set dma identifier */
-       if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
-               if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
-                       dev_err(&pdev->dev, "No usable DMA configuration\n");
-                       ret = -EFAULT;
-                       goto out_err_disable;
-               } else {
-                       pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-               }
-
-       } else {
-               pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
+       if (ret) {
+               dev_err(&pdev->dev, "No usable DMA configuration\n");
+               goto out_err_disable;
        }
 
        if (pci_request_regions(pdev, ADF_DH895XCC_DEVICE_NAME)) {
@@ -208,12 +201,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (pci_save_state(pdev)) {
                dev_err(&pdev->dev, "Failed to save pci state\n");
                ret = -ENOMEM;
-               goto out_err_free_reg;
+               goto out_err_disable_aer;
        }
 
        ret = qat_crypto_dev_config(accel_dev);
        if (ret)
-               goto out_err_free_reg;
+               goto out_err_disable_aer;
 
        ret = adf_dev_init(accel_dev);
        if (ret)
@@ -229,6 +222,8 @@ out_err_dev_stop:
        adf_dev_stop(accel_dev);
 out_err_dev_shutdown:
        adf_dev_shutdown(accel_dev);
+out_err_disable_aer:
+       adf_disable_aer(accel_dev);
 out_err_free_reg:
        pci_release_regions(accel_pci_dev->pci_dev);
 out_err_disable:
index f14fb82..7c6ed6b 100644 (file)
@@ -52,11 +52,6 @@ static u32 get_pf2vf_offset(u32 i)
        return ADF_DH895XCCIOV_PF2VF_OFFSET;
 }
 
-static u32 get_vintmsk_offset(u32 i)
-{
-       return ADF_DH895XCCIOV_VINTMSK_OFFSET;
-}
-
 static int adf_vf_int_noop(struct adf_accel_dev *accel_dev)
 {
        return 0;
@@ -81,10 +76,10 @@ void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
        hw_data->enable_error_correction = adf_vf_void_noop;
        hw_data->init_admin_comms = adf_vf_int_noop;
        hw_data->exit_admin_comms = adf_vf_void_noop;
-       hw_data->send_admin_init = adf_vf2pf_init;
+       hw_data->send_admin_init = adf_vf2pf_notify_init;
        hw_data->init_arb = adf_vf_int_noop;
        hw_data->exit_arb = adf_vf_void_noop;
-       hw_data->disable_iov = adf_vf2pf_shutdown;
+       hw_data->disable_iov = adf_vf2pf_notify_shutdown;
        hw_data->get_accel_mask = get_accel_mask;
        hw_data->get_ae_mask = get_ae_mask;
        hw_data->get_num_accels = get_num_accels;
@@ -92,11 +87,10 @@ void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
        hw_data->get_etr_bar_id = get_etr_bar_id;
        hw_data->get_misc_bar_id = get_misc_bar_id;
        hw_data->get_pf2vf_offset = get_pf2vf_offset;
-       hw_data->get_vintmsk_offset = get_vintmsk_offset;
        hw_data->get_sku = get_sku;
        hw_data->enable_ints = adf_vf_void_noop;
-       hw_data->enable_vf2pf_comms = adf_enable_vf2pf_comms;
-       hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
+       hw_data->enable_pfvf_comms = adf_enable_vf2pf_comms;
+       hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
        hw_data->dev_class->instances++;
        adf_devmgr_update_class_index(hw_data);
        adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
index 2bfcc67..306ebb7 100644 (file)
@@ -13,7 +13,6 @@
 #define ADF_DH895XCCIOV_ETR_BAR 0
 #define ADF_DH895XCCIOV_ETR_MAX_BANKS 1
 #define ADF_DH895XCCIOV_PF2VF_OFFSET   0x200
-#define ADF_DH895XCCIOV_VINTMSK_OFFSET 0x208
 
 void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data);
 void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data);
index 29999da..99d90f3 100644 (file)
@@ -141,17 +141,10 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
 
        /* set dma identifier */
-       if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
-               if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
-                       dev_err(&pdev->dev, "No usable DMA configuration\n");
-                       ret = -EFAULT;
-                       goto out_err_disable;
-               } else {
-                       pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-               }
-
-       } else {
-               pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
+       if (ret) {
+               dev_err(&pdev->dev, "No usable DMA configuration\n");
+               goto out_err_disable;
        }
 
        if (pci_request_regions(pdev, ADF_DH895XCCVF_DEVICE_NAME)) {
@@ -218,6 +211,7 @@ static void adf_remove(struct pci_dev *pdev)
                pr_err("QAT: Driver removal failed\n");
                return;
        }
+       adf_flush_vf_wq(accel_dev);
        adf_dev_stop(accel_dev);
        adf_dev_shutdown(accel_dev);
        adf_cleanup_accel(accel_dev);
index 080955a..e2375d9 100644 (file)
@@ -187,9 +187,9 @@ static int virtcrypto_init_vqs(struct virtio_crypto *vi)
        if (ret)
                goto err_free;
 
-       get_online_cpus();
+       cpus_read_lock();
        virtcrypto_set_affinity(vi);
-       put_online_cpus();
+       cpus_read_unlock();
 
        return 0;
 
index 5fa6ae9..44736cb 100644 (file)
@@ -313,7 +313,7 @@ long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
                return -ENXIO;
 
        if (nr_pages < 0)
-               return nr_pages;
+               return -EINVAL;
 
        avail = dax_dev->ops->direct_access(dax_dev, pgoff, nr_pages,
                        kaddr, pfn);
index 26482c7..fc708be 100644 (file)
@@ -294,6 +294,14 @@ struct idxd_desc {
        struct idxd_wq *wq;
 };
 
+/*
+ * This is software defined error for the completion status. We overload the error code
+ * that will never appear in completion status and only SWERR register.
+ */
+enum idxd_completion_status {
+       IDXD_COMP_DESC_ABORT = 0xff,
+};
+
 #define confdev_to_idxd(dev) container_of(dev, struct idxd_device, conf_dev)
 #define confdev_to_wq(dev) container_of(dev, struct idxd_wq, conf_dev)
 
@@ -482,4 +490,10 @@ static inline void perfmon_init(void) {}
 static inline void perfmon_exit(void) {}
 #endif
 
+static inline void complete_desc(struct idxd_desc *desc, enum idxd_complete_type reason)
+{
+       idxd_dma_complete_txd(desc, reason);
+       idxd_free_desc(desc->wq, desc);
+}
+
 #endif
index c8ae41d..c0f4c04 100644 (file)
@@ -102,6 +102,8 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
                spin_lock_init(&idxd->irq_entries[i].list_lock);
        }
 
+       idxd_msix_perm_setup(idxd);
+
        irq_entry = &idxd->irq_entries[0];
        rc = request_threaded_irq(irq_entry->vector, NULL, idxd_misc_thread,
                                  0, "idxd-misc", irq_entry);
@@ -148,7 +150,6 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
        }
 
        idxd_unmask_error_interrupts(idxd);
-       idxd_msix_perm_setup(idxd);
        return 0;
 
  err_wq_irqs:
@@ -162,6 +163,7 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
  err_misc_irq:
        /* Disable error interrupt generation */
        idxd_mask_error_interrupts(idxd);
+       idxd_msix_perm_clear(idxd);
  err_irq_entries:
        pci_free_irq_vectors(pdev);
        dev_err(dev, "No usable interrupts\n");
@@ -758,32 +760,40 @@ static void idxd_shutdown(struct pci_dev *pdev)
        for (i = 0; i < msixcnt; i++) {
                irq_entry = &idxd->irq_entries[i];
                synchronize_irq(irq_entry->vector);
-               free_irq(irq_entry->vector, irq_entry);
                if (i == 0)
                        continue;
                idxd_flush_pending_llist(irq_entry);
                idxd_flush_work_list(irq_entry);
        }
-
-       idxd_msix_perm_clear(idxd);
-       idxd_release_int_handles(idxd);
-       pci_free_irq_vectors(pdev);
-       pci_iounmap(pdev, idxd->reg_base);
-       pci_disable_device(pdev);
-       destroy_workqueue(idxd->wq);
+       flush_workqueue(idxd->wq);
 }
 
 static void idxd_remove(struct pci_dev *pdev)
 {
        struct idxd_device *idxd = pci_get_drvdata(pdev);
+       struct idxd_irq_entry *irq_entry;
+       int msixcnt = pci_msix_vec_count(pdev);
+       int i;
 
        dev_dbg(&pdev->dev, "%s called\n", __func__);
        idxd_shutdown(pdev);
        if (device_pasid_enabled(idxd))
                idxd_disable_system_pasid(idxd);
        idxd_unregister_devices(idxd);
-       perfmon_pmu_remove(idxd);
+
+       for (i = 0; i < msixcnt; i++) {
+               irq_entry = &idxd->irq_entries[i];
+               free_irq(irq_entry->vector, irq_entry);
+       }
+       idxd_msix_perm_clear(idxd);
+       idxd_release_int_handles(idxd);
+       pci_free_irq_vectors(pdev);
+       pci_iounmap(pdev, idxd->reg_base);
        iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
+       pci_disable_device(pdev);
+       destroy_workqueue(idxd->wq);
+       perfmon_pmu_remove(idxd);
+       device_unregister(&idxd->conf_dev);
 }
 
 static struct pci_driver idxd_pci_driver = {
index ae68e1e..4e3a719 100644 (file)
@@ -245,12 +245,6 @@ static inline bool match_fault(struct idxd_desc *desc, u64 fault_addr)
        return false;
 }
 
-static inline void complete_desc(struct idxd_desc *desc, enum idxd_complete_type reason)
-{
-       idxd_dma_complete_txd(desc, reason);
-       idxd_free_desc(desc->wq, desc);
-}
-
 static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
                                     enum irq_work_type wtype,
                                     int *processed, u64 data)
@@ -272,8 +266,16 @@ static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
                reason = IDXD_COMPLETE_DEV_FAIL;
 
        llist_for_each_entry_safe(desc, t, head, llnode) {
-               if (desc->completion->status) {
-                       if ((desc->completion->status & DSA_COMP_STATUS_MASK) != DSA_COMP_SUCCESS)
+               u8 status = desc->completion->status & DSA_COMP_STATUS_MASK;
+
+               if (status) {
+                       if (unlikely(status == IDXD_COMP_DESC_ABORT)) {
+                               complete_desc(desc, IDXD_COMPLETE_ABORT);
+                               (*processed)++;
+                               continue;
+                       }
+
+                       if (unlikely(status != DSA_COMP_SUCCESS))
                                match_fault(desc, data);
                        complete_desc(desc, reason);
                        (*processed)++;
@@ -329,7 +331,14 @@ static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
        spin_unlock_irqrestore(&irq_entry->list_lock, flags);
 
        list_for_each_entry(desc, &flist, list) {
-               if ((desc->completion->status & DSA_COMP_STATUS_MASK) != DSA_COMP_SUCCESS)
+               u8 status = desc->completion->status & DSA_COMP_STATUS_MASK;
+
+               if (unlikely(status == IDXD_COMP_DESC_ABORT)) {
+                       complete_desc(desc, IDXD_COMPLETE_ABORT);
+                       continue;
+               }
+
+               if (unlikely(status != DSA_COMP_SUCCESS))
                        match_fault(desc, data);
                complete_desc(desc, reason);
        }
index 19afb62..36c9c1a 100644 (file)
@@ -25,11 +25,10 @@ static struct idxd_desc *__get_desc(struct idxd_wq *wq, int idx, int cpu)
         * Descriptor completion vectors are 1...N for MSIX. We will round
         * robin through the N vectors.
         */
-       wq->vec_ptr = (wq->vec_ptr % idxd->num_wq_irqs) + 1;
+       wq->vec_ptr = desc->vector = (wq->vec_ptr % idxd->num_wq_irqs) + 1;
        if (!idxd->int_handles) {
                desc->hw->int_handle = wq->vec_ptr;
        } else {
-               desc->vector = wq->vec_ptr;
                /*
                 * int_handles are only for descriptor completion. However for device
                 * MSIX enumeration, vec 0 is used for misc interrupts. Therefore even
@@ -88,9 +87,64 @@ void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc)
        sbitmap_queue_clear(&wq->sbq, desc->id, cpu);
 }
 
+static struct idxd_desc *list_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
+                                        struct idxd_desc *desc)
+{
+       struct idxd_desc *d, *n;
+
+       lockdep_assert_held(&ie->list_lock);
+       list_for_each_entry_safe(d, n, &ie->work_list, list) {
+               if (d == desc) {
+                       list_del(&d->list);
+                       return d;
+               }
+       }
+
+       /*
+        * At this point, the desc needs to be aborted is held by the completion
+        * handler where it has taken it off the pending list but has not added to the
+        * work list. It will be cleaned up by the interrupt handler when it sees the
+        * IDXD_COMP_DESC_ABORT for completion status.
+        */
+       return NULL;
+}
+
+static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
+                            struct idxd_desc *desc)
+{
+       struct idxd_desc *d, *t, *found = NULL;
+       struct llist_node *head;
+       unsigned long flags;
+
+       desc->completion->status = IDXD_COMP_DESC_ABORT;
+       /*
+        * Grab the list lock so it will block the irq thread handler. This allows the
+        * abort code to locate the descriptor need to be aborted.
+        */
+       spin_lock_irqsave(&ie->list_lock, flags);
+       head = llist_del_all(&ie->pending_llist);
+       if (head) {
+               llist_for_each_entry_safe(d, t, head, llnode) {
+                       if (d == desc) {
+                               found = desc;
+                               continue;
+                       }
+                       list_add_tail(&desc->list, &ie->work_list);
+               }
+       }
+
+       if (!found)
+               found = list_abort_desc(wq, ie, desc);
+       spin_unlock_irqrestore(&ie->list_lock, flags);
+
+       if (found)
+               complete_desc(found, IDXD_COMPLETE_ABORT);
+}
+
 int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
 {
        struct idxd_device *idxd = wq->idxd;
+       struct idxd_irq_entry *ie = NULL;
        void __iomem *portal;
        int rc;
 
@@ -108,6 +162,16 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
         * even on UP because the recipient is a device.
         */
        wmb();
+
+       /*
+        * Pending the descriptor to the lockless list for the irq_entry
+        * that we designated the descriptor to.
+        */
+       if (desc->hw->flags & IDXD_OP_FLAG_RCI) {
+               ie = &idxd->irq_entries[desc->vector];
+               llist_add(&desc->llnode, &ie->pending_llist);
+       }
+
        if (wq_dedicated(wq)) {
                iosubmit_cmds512(portal, desc->hw, 1);
        } else {
@@ -118,29 +182,13 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
                 * device is not accepting descriptor at all.
                 */
                rc = enqcmds(portal, desc->hw);
-               if (rc < 0)
+               if (rc < 0) {
+                       if (ie)
+                               llist_abort_desc(wq, ie, desc);
                        return rc;
+               }
        }
 
        percpu_ref_put(&wq->wq_active);
-
-       /*
-        * Pending the descriptor to the lockless list for the irq_entry
-        * that we designated the descriptor to.
-        */
-       if (desc->hw->flags & IDXD_OP_FLAG_RCI) {
-               int vec;
-
-               /*
-                * If the driver is on host kernel, it would be the value
-                * assigned to interrupt handle, which is index for MSIX
-                * vector. If it's guest then can't use the int_handle since
-                * that is the index to IMS for the entire device. The guest
-                * device local index will be used.
-                */
-               vec = !idxd->int_handles ? desc->hw->int_handle : desc->vector;
-               llist_add(&desc->llnode, &idxd->irq_entries[vec].pending_llist);
-       }
-
        return 0;
 }
index 0460d58..bb4df63 100644 (file)
@@ -1744,8 +1744,6 @@ void idxd_unregister_devices(struct idxd_device *idxd)
 
                device_unregister(&group->conf_dev);
        }
-
-       device_unregister(&idxd->conf_dev);
 }
 
 int idxd_register_bus_type(void)
index 7f116bb..2ddc31e 100644 (file)
@@ -812,6 +812,8 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
                dma_length += sg_dma_len(sg);
        }
 
+       imxdma_config_write(chan, &imxdmac->config, direction);
+
        switch (imxdmac->word_size) {
        case DMA_SLAVE_BUSWIDTH_4_BYTES:
                if (sg_dma_len(sgl) & 3 || sgl->dma_address & 3)
index ec00b20..ac61ecd 100644 (file)
@@ -67,8 +67,12 @@ static struct dma_chan *of_dma_router_xlate(struct of_phandle_args *dma_spec,
                return NULL;
 
        ofdma_target = of_dma_find_controller(&dma_spec_target);
-       if (!ofdma_target)
-               return NULL;
+       if (!ofdma_target) {
+               ofdma->dma_router->route_free(ofdma->dma_router->dev,
+                                             route_data);
+               chan = ERR_PTR(-EPROBE_DEFER);
+               goto err;
+       }
 
        chan = ofdma_target->of_dma_xlate(&dma_spec_target, ofdma_target);
        if (IS_ERR_OR_NULL(chan)) {
@@ -89,6 +93,7 @@ static struct dma_chan *of_dma_router_xlate(struct of_phandle_args *dma_spec,
                }
        }
 
+err:
        /*
         * Need to put the node back since the ofdma->of_dma_route_allocate
         * has taken it for generating the new, translated dma_spec
index 8f7ceb6..1cc0690 100644 (file)
@@ -855,8 +855,8 @@ static int usb_dmac_probe(struct platform_device *pdev)
 
 error:
        of_dma_controller_free(pdev->dev.of_node);
-       pm_runtime_put(&pdev->dev);
 error_pm:
+       pm_runtime_put(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
        return ret;
 }
index f54ecb1..7dd1d3d 100644 (file)
@@ -1200,7 +1200,7 @@ static int stm32_dma_alloc_chan_resources(struct dma_chan *c)
 
        chan->config_init = false;
 
-       ret = pm_runtime_get_sync(dmadev->ddev.dev);
+       ret = pm_runtime_resume_and_get(dmadev->ddev.dev);
        if (ret < 0)
                return ret;
 
@@ -1470,7 +1470,7 @@ static int stm32_dma_suspend(struct device *dev)
        struct stm32_dma_device *dmadev = dev_get_drvdata(dev);
        int id, ret, scr;
 
-       ret = pm_runtime_get_sync(dev);
+       ret = pm_runtime_resume_and_get(dev);
        if (ret < 0)
                return ret;
 
index ef0d055..a421643 100644 (file)
@@ -137,7 +137,7 @@ static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec,
 
        /* Set dma request */
        spin_lock_irqsave(&dmamux->lock, flags);
-       ret = pm_runtime_get_sync(&pdev->dev);
+       ret = pm_runtime_resume_and_get(&pdev->dev);
        if (ret < 0) {
                spin_unlock_irqrestore(&dmamux->lock, flags);
                goto error;
@@ -336,7 +336,7 @@ static int stm32_dmamux_suspend(struct device *dev)
        struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
        int i, ret;
 
-       ret = pm_runtime_get_sync(dev);
+       ret = pm_runtime_resume_and_get(dev);
        if (ret < 0)
                return ret;
 
@@ -361,7 +361,7 @@ static int stm32_dmamux_resume(struct device *dev)
        if (ret < 0)
                return ret;
 
-       ret = pm_runtime_get_sync(dev);
+       ret = pm_runtime_resume_and_get(dev);
        if (ret < 0)
                return ret;
 
index 16b1965..d6b8a20 100644 (file)
@@ -209,8 +209,8 @@ static int uniphier_xdmac_chan_stop(struct uniphier_xdmac_chan *xc)
        writel(0, xc->reg_ch_base + XDMAC_TSS);
 
        /* wait until transfer is stopped */
-       return readl_poll_timeout(xc->reg_ch_base + XDMAC_STAT, val,
-                                 !(val & XDMAC_STAT_TENF), 100, 1000);
+       return readl_poll_timeout_atomic(xc->reg_ch_base + XDMAC_STAT, val,
+                                        !(val & XDMAC_STAT_TENF), 100, 1000);
 }
 
 /* xc->vc.lock must be held by caller */
index 75c0b8e..4b9530a 100644 (file)
@@ -394,6 +394,7 @@ struct xilinx_dma_tx_descriptor {
  * @genlock: Support genlock mode
  * @err: Channel has errors
  * @idle: Check for channel idle
+ * @terminating: Check for channel being synchronized by user
  * @tasklet: Cleanup work after irq
  * @config: Device configuration info
  * @flush_on_fsync: Flush on Frame sync
@@ -431,6 +432,7 @@ struct xilinx_dma_chan {
        bool genlock;
        bool err;
        bool idle;
+       bool terminating;
        struct tasklet_struct tasklet;
        struct xilinx_vdma_config config;
        bool flush_on_fsync;
@@ -1049,6 +1051,13 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
                /* Run any dependencies, then free the descriptor */
                dma_run_dependencies(&desc->async_tx);
                xilinx_dma_free_tx_descriptor(chan, desc);
+
+               /*
+                * While we ran a callback the user called a terminate function,
+                * which takes care of cleaning up any remaining descriptors
+                */
+               if (chan->terminating)
+                       break;
        }
 
        spin_unlock_irqrestore(&chan->lock, flags);
@@ -1965,6 +1974,8 @@ static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
        if (desc->cyclic)
                chan->cyclic = true;
 
+       chan->terminating = false;
+
        spin_unlock_irqrestore(&chan->lock, flags);
 
        return cookie;
@@ -2436,6 +2447,7 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan)
 
        xilinx_dma_chan_reset(chan);
        /* Remove and free all of the descriptors in the lists */
+       chan->terminating = true;
        xilinx_dma_free_descriptors(chan);
        chan->idle = true;
 
index 61c21bd..3a6d241 100644 (file)
@@ -539,10 +539,18 @@ module_platform_driver(altr_edac_driver);
  * trigger testing are different for each memory.
  */
 
+#ifdef CONFIG_EDAC_ALTERA_OCRAM
 static const struct edac_device_prv_data ocramecc_data;
+#endif
+#ifdef CONFIG_EDAC_ALTERA_L2C
 static const struct edac_device_prv_data l2ecc_data;
+#endif
+#ifdef CONFIG_EDAC_ALTERA_OCRAM
 static const struct edac_device_prv_data a10_ocramecc_data;
+#endif
+#ifdef CONFIG_EDAC_ALTERA_L2C
 static const struct edac_device_prv_data a10_l2ecc_data;
+#endif
 
 static irqreturn_t altr_edac_device_handler(int irq, void *dev_id)
 {
@@ -569,9 +577,9 @@ static irqreturn_t altr_edac_device_handler(int irq, void *dev_id)
        return ret_value;
 }
 
-static ssize_t altr_edac_device_trig(struct file *file,
-                                    const char __user *user_buf,
-                                    size_t count, loff_t *ppos)
+static ssize_t __maybe_unused
+altr_edac_device_trig(struct file *file, const char __user *user_buf,
+                     size_t count, loff_t *ppos)
 
 {
        u32 *ptemp, i, error_mask;
@@ -640,27 +648,27 @@ static ssize_t altr_edac_device_trig(struct file *file,
        return count;
 }
 
-static const struct file_operations altr_edac_device_inject_fops = {
+static const struct file_operations altr_edac_device_inject_fops __maybe_unused = {
        .open = simple_open,
        .write = altr_edac_device_trig,
        .llseek = generic_file_llseek,
 };
 
-static ssize_t altr_edac_a10_device_trig(struct file *file,
-                                        const char __user *user_buf,
-                                        size_t count, loff_t *ppos);
+static ssize_t __maybe_unused
+altr_edac_a10_device_trig(struct file *file, const char __user *user_buf,
+                         size_t count, loff_t *ppos);
 
-static const struct file_operations altr_edac_a10_device_inject_fops = {
+static const struct file_operations altr_edac_a10_device_inject_fops __maybe_unused = {
        .open = simple_open,
        .write = altr_edac_a10_device_trig,
        .llseek = generic_file_llseek,
 };
 
-static ssize_t altr_edac_a10_device_trig2(struct file *file,
-                                         const char __user *user_buf,
-                                         size_t count, loff_t *ppos);
+static ssize_t __maybe_unused
+altr_edac_a10_device_trig2(struct file *file, const char __user *user_buf,
+                          size_t count, loff_t *ppos);
 
-static const struct file_operations altr_edac_a10_device_inject2_fops = {
+static const struct file_operations altr_edac_a10_device_inject2_fops __maybe_unused = {
        .open = simple_open,
        .write = altr_edac_a10_device_trig2,
        .llseek = generic_file_llseek,
@@ -1697,9 +1705,9 @@ MODULE_DEVICE_TABLE(of, altr_edac_a10_device_of_match);
  * Based on xgene_edac.c peripheral code.
  */
 
-static ssize_t altr_edac_a10_device_trig(struct file *file,
-                                        const char __user *user_buf,
-                                        size_t count, loff_t *ppos)
+static ssize_t __maybe_unused
+altr_edac_a10_device_trig(struct file *file, const char __user *user_buf,
+                         size_t count, loff_t *ppos)
 {
        struct edac_device_ctl_info *edac_dci = file->private_data;
        struct altr_edac_device_dev *drvdata = edac_dci->pvt_info;
@@ -1729,9 +1737,9 @@ static ssize_t altr_edac_a10_device_trig(struct file *file,
  * slightly. A few Arria10 peripherals can use this injection function.
  * Inject the error into the memory and then readback to trigger the IRQ.
  */
-static ssize_t altr_edac_a10_device_trig2(struct file *file,
-                                         const char __user *user_buf,
-                                         size_t count, loff_t *ppos)
+static ssize_t __maybe_unused
+altr_edac_a10_device_trig2(struct file *file, const char __user *user_buf,
+                          size_t count, loff_t *ppos)
 {
        struct edac_device_ctl_info *edac_dci = file->private_data;
        struct altr_edac_device_dev *drvdata = edac_dci->pvt_info;
@@ -1804,11 +1812,8 @@ static void altr_edac_a10_irq_handler(struct irq_desc *desc)
        regmap_read(edac->ecc_mgr_map, sm_offset, &irq_status);
 
        bits = irq_status;
-       for_each_set_bit(bit, &bits, 32) {
-               irq = irq_linear_revmap(edac->domain, dberr * 32 + bit);
-               if (irq)
-                       generic_handle_irq(irq);
-       }
+       for_each_set_bit(bit, &bits, 32)
+               generic_handle_domain_irq(edac->domain, dberr * 32 + bit);
 
        chained_irq_exit(chip, desc);
 }
index f0d8f60..99b06a3 100644 (file)
@@ -571,8 +571,8 @@ EDAC_DCT_ATTR_SHOW(dbam0);
 EDAC_DCT_ATTR_SHOW(top_mem);
 EDAC_DCT_ATTR_SHOW(top_mem2);
 
-static ssize_t hole_show(struct device *dev, struct device_attribute *mattr,
-                        char *data)
+static ssize_t dram_hole_show(struct device *dev, struct device_attribute *mattr,
+                             char *data)
 {
        struct mem_ctl_info *mci = to_mci(dev);
 
@@ -593,7 +593,7 @@ static DEVICE_ATTR(dhar, S_IRUGO, dhar_show, NULL);
 static DEVICE_ATTR(dbam, S_IRUGO, dbam0_show, NULL);
 static DEVICE_ATTR(topmem, S_IRUGO, top_mem_show, NULL);
 static DEVICE_ATTR(topmem2, S_IRUGO, top_mem2_show, NULL);
-static DEVICE_ATTR(dram_hole, S_IRUGO, hole_show, NULL);
+static DEVICE_ATTR_RO(dram_hole);
 
 static struct attribute *dbg_attrs[] = {
        &dev_attr_dhar.attr,
@@ -802,16 +802,11 @@ static ssize_t inject_write_store(struct device *dev,
  * update NUM_INJ_ATTRS in case you add new members
  */
 
-static DEVICE_ATTR(inject_section, S_IRUGO | S_IWUSR,
-                  inject_section_show, inject_section_store);
-static DEVICE_ATTR(inject_word, S_IRUGO | S_IWUSR,
-                  inject_word_show, inject_word_store);
-static DEVICE_ATTR(inject_ecc_vector, S_IRUGO | S_IWUSR,
-                  inject_ecc_vector_show, inject_ecc_vector_store);
-static DEVICE_ATTR(inject_write, S_IWUSR,
-                  NULL, inject_write_store);
-static DEVICE_ATTR(inject_read,  S_IWUSR,
-                  NULL, inject_read_store);
+static DEVICE_ATTR_RW(inject_section);
+static DEVICE_ATTR_RW(inject_word);
+static DEVICE_ATTR_RW(inject_ecc_vector);
+static DEVICE_ATTR_WO(inject_write);
+static DEVICE_ATTR_WO(inject_read);
 
 static struct attribute *inj_attrs[] = {
        &dev_attr_inject_section.attr,
index f6d462d..2c59756 100644 (file)
@@ -166,6 +166,7 @@ const char * const edac_mem_types[] = {
        [MEM_DDR5]      = "Unbuffered-DDR5",
        [MEM_NVDIMM]    = "Non-volatile-RAM",
        [MEM_WIO2]      = "Wide-IO-2",
+       [MEM_HBM2]      = "High-bandwidth-memory-Gen2",
 };
 EXPORT_SYMBOL_GPL(edac_mem_types);
 
index 6ce0ed2..83345bf 100644 (file)
 #define I10NM_GET_DIMMMTR(m, i, j)     \
        readl((m)->mbase + ((m)->hbm_mc ? 0x80c : 0x2080c) + \
        (i) * (m)->chan_mmio_sz + (j) * 4)
-#define I10NM_GET_MCDDRTCFG(m, i, j)   \
+#define I10NM_GET_MCDDRTCFG(m, i)      \
        readl((m)->mbase + ((m)->hbm_mc ? 0x970 : 0x20970) + \
-       (i) * (m)->chan_mmio_sz + (j) * 4)
+       (i) * (m)->chan_mmio_sz)
 #define I10NM_GET_MCMTR(m, i)          \
        readl((m)->mbase + ((m)->hbm_mc ? 0xef8 : 0x20ef8) + \
        (i) * (m)->chan_mmio_sz)
 #define I10NM_GET_AMAP(m, i)           \
        readl((m)->mbase + ((m)->hbm_mc ? 0x814 : 0x20814) + \
        (i) * (m)->chan_mmio_sz)
+#define I10NM_GET_REG32(m, i, offset)  \
+       readl((m)->mbase + (i) * (m)->chan_mmio_sz + (offset))
+#define I10NM_GET_REG64(m, i, offset)  \
+       readq((m)->mbase + (i) * (m)->chan_mmio_sz + (offset))
+#define I10NM_SET_REG32(m, i, offset, v)       \
+       writel(v, (m)->mbase + (i) * (m)->chan_mmio_sz + (offset))
 
 #define I10NM_GET_SCK_MMIO_BASE(reg)   (GET_BITFIELD(reg, 0, 28) << 23)
 #define I10NM_GET_IMC_MMIO_OFFSET(reg) (GET_BITFIELD(reg, 0, 10) << 12)
 #define I10NM_SAD_ENABLE(reg)          GET_BITFIELD(reg, 0, 0)
 #define I10NM_SAD_NM_CACHEABLE(reg)    GET_BITFIELD(reg, 5, 5)
 
+#define RETRY_RD_ERR_LOG_UC            BIT(1)
+#define RETRY_RD_ERR_LOG_NOOVER                BIT(14)
+#define RETRY_RD_ERR_LOG_EN            BIT(15)
+#define RETRY_RD_ERR_LOG_NOOVER_UC     (BIT(14) | BIT(1))
+#define RETRY_RD_ERR_LOG_OVER_UC_V     (BIT(2) | BIT(1) | BIT(0))
+
 static struct list_head *i10nm_edac_list;
 
+static struct res_config *res_cfg;
+static int retry_rd_err_log;
+
+static u32 offsets_scrub_icx[]  = {0x22c60, 0x22c54, 0x22c5c, 0x22c58, 0x22c28, 0x20ed8};
+static u32 offsets_scrub_spr[]  = {0x22c60, 0x22c54, 0x22f08, 0x22c58, 0x22c28, 0x20ed8};
+static u32 offsets_demand_icx[] = {0x22e54, 0x22e60, 0x22e64, 0x22e58, 0x22e5c, 0x20ee0};
+static u32 offsets_demand_spr[] = {0x22e54, 0x22e60, 0x22f10, 0x22e58, 0x22e5c, 0x20ee0};
+
+static void __enable_retry_rd_err_log(struct skx_imc *imc, int chan, bool enable)
+{
+       u32 s, d;
+
+       if (!imc->mbase)
+               return;
+
+       s = I10NM_GET_REG32(imc, chan, res_cfg->offsets_scrub[0]);
+       d = I10NM_GET_REG32(imc, chan, res_cfg->offsets_demand[0]);
+
+       if (enable) {
+               /* Save default configurations */
+               imc->chan[chan].retry_rd_err_log_s = s;
+               imc->chan[chan].retry_rd_err_log_d = d;
+
+               s &= ~RETRY_RD_ERR_LOG_NOOVER_UC;
+               s |=  RETRY_RD_ERR_LOG_EN;
+               d &= ~RETRY_RD_ERR_LOG_NOOVER_UC;
+               d |=  RETRY_RD_ERR_LOG_EN;
+       } else {
+               /* Restore default configurations */
+               if (imc->chan[chan].retry_rd_err_log_s & RETRY_RD_ERR_LOG_UC)
+                       s |=  RETRY_RD_ERR_LOG_UC;
+               if (imc->chan[chan].retry_rd_err_log_s & RETRY_RD_ERR_LOG_NOOVER)
+                       s |=  RETRY_RD_ERR_LOG_NOOVER;
+               if (!(imc->chan[chan].retry_rd_err_log_s & RETRY_RD_ERR_LOG_EN))
+                       s &= ~RETRY_RD_ERR_LOG_EN;
+               if (imc->chan[chan].retry_rd_err_log_d & RETRY_RD_ERR_LOG_UC)
+                       d |=  RETRY_RD_ERR_LOG_UC;
+               if (imc->chan[chan].retry_rd_err_log_d & RETRY_RD_ERR_LOG_NOOVER)
+                       d |=  RETRY_RD_ERR_LOG_NOOVER;
+               if (!(imc->chan[chan].retry_rd_err_log_d & RETRY_RD_ERR_LOG_EN))
+                       d &= ~RETRY_RD_ERR_LOG_EN;
+       }
+
+       I10NM_SET_REG32(imc, chan, res_cfg->offsets_scrub[0], s);
+       I10NM_SET_REG32(imc, chan, res_cfg->offsets_demand[0], d);
+}
+
+static void enable_retry_rd_err_log(bool enable)
+{
+       struct skx_dev *d;
+       int i, j;
+
+       edac_dbg(2, "\n");
+
+       list_for_each_entry(d, i10nm_edac_list, list)
+               for (i = 0; i < I10NM_NUM_IMC; i++)
+                       for (j = 0; j < I10NM_NUM_CHANNELS; j++)
+                               __enable_retry_rd_err_log(&d->imc[i], j, enable);
+}
+
+static void show_retry_rd_err_log(struct decoded_addr *res, char *msg,
+                                 int len, bool scrub_err)
+{
+       struct skx_imc *imc = &res->dev->imc[res->imc];
+       u32 log0, log1, log2, log3, log4;
+       u32 corr0, corr1, corr2, corr3;
+       u64 log2a, log5;
+       u32 *offsets;
+       int n;
+
+       if (!imc->mbase)
+               return;
+
+       offsets = scrub_err ? res_cfg->offsets_scrub : res_cfg->offsets_demand;
+
+       log0 = I10NM_GET_REG32(imc, res->channel, offsets[0]);
+       log1 = I10NM_GET_REG32(imc, res->channel, offsets[1]);
+       log3 = I10NM_GET_REG32(imc, res->channel, offsets[3]);
+       log4 = I10NM_GET_REG32(imc, res->channel, offsets[4]);
+       log5 = I10NM_GET_REG64(imc, res->channel, offsets[5]);
+
+       if (res_cfg->type == SPR) {
+               log2a = I10NM_GET_REG64(imc, res->channel, offsets[2]);
+               n = snprintf(msg, len, " retry_rd_err_log[%.8x %.8x %.16llx %.8x %.8x %.16llx]",
+                            log0, log1, log2a, log3, log4, log5);
+       } else {
+               log2 = I10NM_GET_REG32(imc, res->channel, offsets[2]);
+               n = snprintf(msg, len, " retry_rd_err_log[%.8x %.8x %.8x %.8x %.8x %.16llx]",
+                            log0, log1, log2, log3, log4, log5);
+       }
+
+       corr0 = I10NM_GET_REG32(imc, res->channel, 0x22c18);
+       corr1 = I10NM_GET_REG32(imc, res->channel, 0x22c1c);
+       corr2 = I10NM_GET_REG32(imc, res->channel, 0x22c20);
+       corr3 = I10NM_GET_REG32(imc, res->channel, 0x22c24);
+
+       if (len - n > 0)
+               snprintf(msg + n, len - n,
+                        " correrrcnt[%.4x %.4x %.4x %.4x %.4x %.4x %.4x %.4x]",
+                        corr0 & 0xffff, corr0 >> 16,
+                        corr1 & 0xffff, corr1 >> 16,
+                        corr2 & 0xffff, corr2 >> 16,
+                        corr3 & 0xffff, corr3 >> 16);
+
+       /* Clear status bits */
+       if (retry_rd_err_log == 2 && (log0 & RETRY_RD_ERR_LOG_OVER_UC_V)) {
+               log0 &= ~RETRY_RD_ERR_LOG_OVER_UC_V;
+               I10NM_SET_REG32(imc, res->channel, offsets[0], log0);
+       }
+}
+
 static struct pci_dev *pci_get_dev_wrapper(int dom, unsigned int bus,
                                           unsigned int dev, unsigned int fun)
 {
@@ -263,6 +386,8 @@ static struct res_config i10nm_cfg0 = {
        .ddr_chan_mmio_sz       = 0x4000,
        .sad_all_devfn          = PCI_DEVFN(29, 0),
        .sad_all_offset         = 0x108,
+       .offsets_scrub          = offsets_scrub_icx,
+       .offsets_demand         = offsets_demand_icx,
 };
 
 static struct res_config i10nm_cfg1 = {
@@ -272,6 +397,8 @@ static struct res_config i10nm_cfg1 = {
        .ddr_chan_mmio_sz       = 0x4000,
        .sad_all_devfn          = PCI_DEVFN(29, 0),
        .sad_all_offset         = 0x108,
+       .offsets_scrub          = offsets_scrub_icx,
+       .offsets_demand         = offsets_demand_icx,
 };
 
 static struct res_config spr_cfg = {
@@ -283,6 +410,8 @@ static struct res_config spr_cfg = {
        .support_ddr5           = true,
        .sad_all_devfn          = PCI_DEVFN(10, 0),
        .sad_all_offset         = 0x300,
+       .offsets_scrub          = offsets_scrub_spr,
+       .offsets_demand         = offsets_demand_spr,
 };
 
 static const struct x86_cpu_id i10nm_cpuids[] = {
@@ -321,10 +450,10 @@ static int i10nm_get_dimm_config(struct mem_ctl_info *mci,
 
                ndimms = 0;
                amap = I10NM_GET_AMAP(imc, i);
+               mcddrtcfg = I10NM_GET_MCDDRTCFG(imc, i);
                for (j = 0; j < imc->num_dimms; j++) {
                        dimm = edac_get_dimm(mci, i, j, 0);
                        mtr = I10NM_GET_DIMMMTR(imc, i, j);
-                       mcddrtcfg = I10NM_GET_MCDDRTCFG(imc, i, j);
                        edac_dbg(1, "dimmmtr 0x%x mcddrtcfg 0x%x (mc%d ch%d dimm%d)\n",
                                 mtr, mcddrtcfg, imc->mc, i, j);
 
@@ -422,6 +551,7 @@ static int __init i10nm_init(void)
                return -ENODEV;
 
        cfg = (struct res_config *)id->driver_data;
+       res_cfg = cfg;
 
        rc = skx_get_hi_lo(0x09a2, off, &tolm, &tohm);
        if (rc)
@@ -486,6 +616,12 @@ static int __init i10nm_init(void)
        mce_register_decode_chain(&i10nm_mce_dec);
        setup_i10nm_debug();
 
+       if (retry_rd_err_log && res_cfg->offsets_scrub && res_cfg->offsets_demand) {
+               skx_set_decode(NULL, show_retry_rd_err_log);
+               if (retry_rd_err_log == 2)
+                       enable_retry_rd_err_log(true);
+       }
+
        i10nm_printk(KERN_INFO, "%s\n", I10NM_REVISION);
 
        return 0;
@@ -497,6 +633,13 @@ fail:
 static void __exit i10nm_exit(void)
 {
        edac_dbg(2, "\n");
+
+       if (retry_rd_err_log && res_cfg->offsets_scrub && res_cfg->offsets_demand) {
+               skx_set_decode(NULL, NULL);
+               if (retry_rd_err_log == 2)
+                       enable_retry_rd_err_log(false);
+       }
+
        teardown_i10nm_debug();
        mce_unregister_decode_chain(&i10nm_mce_dec);
        skx_adxl_put();
@@ -506,5 +649,8 @@ static void __exit i10nm_exit(void)
 module_init(i10nm_init);
 module_exit(i10nm_exit);
 
+module_param(retry_rd_err_log, int, 0444);
+MODULE_PARM_DESC(retry_rd_err_log, "retry_rd_err_log: 0=off(default), 1=bios(Linux doesn't reset any control bits, but just reports values.), 2=linux(Linux tries to take control and resets mode bits, clear valid/UC bits after reading.)");
+
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("MC Driver for Intel 10nm server processors");
index 27d5692..67dbf4c 100644 (file)
@@ -1246,6 +1246,9 @@ static int __init mce_amd_init(void)
            c->x86_vendor != X86_VENDOR_HYGON)
                return -ENODEV;
 
+       if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
+               return -ENODEV;
+
        if (boot_cpu_has(X86_FEATURE_SMCA)) {
                xec_mask = 0x3f;
                goto out;
index 4dbd465..1abc020 100644 (file)
@@ -230,7 +230,8 @@ static int skx_get_dimm_config(struct mem_ctl_info *mci, struct res_config *cfg)
 #define SKX_ILV_TARGET(tgt)    ((tgt) & 7)
 
 static void skx_show_retry_rd_err_log(struct decoded_addr *res,
-                                     char *msg, int len)
+                                     char *msg, int len,
+                                     bool scrub_err)
 {
        u32 log0, log1, log2, log3, log4;
        u32 corr0, corr1, corr2, corr3;
index 5e83f59..19c17c5 100644 (file)
@@ -345,7 +345,10 @@ int skx_get_dimm_info(u32 mtr, u32 mcmtr, u32 amap, struct dimm_info *dimm,
        rows = numrow(mtr);
        cols = imc->hbm_mc ? 6 : numcol(mtr);
 
-       if (cfg->support_ddr5 && ((amap & 0x8) || imc->hbm_mc)) {
+       if (imc->hbm_mc) {
+               banks = 32;
+               mtype = MEM_HBM2;
+       } else if (cfg->support_ddr5 && (amap & 0x8)) {
                banks = 32;
                mtype = MEM_DDR5;
        } else {
@@ -529,6 +532,7 @@ static void skx_mce_output_error(struct mem_ctl_info *mci,
        bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
        bool overflow = GET_BITFIELD(m->status, 62, 62);
        bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
+       bool scrub_err = false;
        bool recoverable;
        int len;
        u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
@@ -580,6 +584,7 @@ static void skx_mce_output_error(struct mem_ctl_info *mci,
                        break;
                case 4:
                        optype = "memory scrubbing error";
+                       scrub_err = true;
                        break;
                default:
                        optype = "reserved";
@@ -602,7 +607,7 @@ static void skx_mce_output_error(struct mem_ctl_info *mci,
        }
 
        if (skx_show_retry_rd_err_log)
-               skx_show_retry_rd_err_log(res, skx_msg + len, MSG_SIZE - len);
+               skx_show_retry_rd_err_log(res, skx_msg + len, MSG_SIZE - len, scrub_err);
 
        edac_dbg(0, "%s\n", skx_msg);
 
index 01f67e7..03ac067 100644 (file)
@@ -80,6 +80,8 @@ struct skx_dev {
                struct skx_channel {
                        struct pci_dev  *cdev;
                        struct pci_dev  *edev;
+                       u32 retry_rd_err_log_s;
+                       u32 retry_rd_err_log_d;
                        struct skx_dimm {
                                u8 close_pg;
                                u8 bank_xor_enable;
@@ -150,12 +152,15 @@ struct res_config {
        /* SAD device number and function number */
        unsigned int sad_all_devfn;
        int sad_all_offset;
+       /* Offsets of retry_rd_err_log registers */
+       u32 *offsets_scrub;
+       u32 *offsets_demand;
 };
 
 typedef int (*get_dimm_config_f)(struct mem_ctl_info *mci,
                                 struct res_config *cfg);
 typedef bool (*skx_decode_f)(struct decoded_addr *res);
-typedef void (*skx_show_retry_log_f)(struct decoded_addr *res, char *msg, int len);
+typedef void (*skx_show_retry_log_f)(struct decoded_addr *res, char *msg, int len, bool scrub_err);
 
 int __init skx_adxl_get(void);
 void __exit skx_adxl_put(void);
index ed10da5..a5bf4c3 100644 (file)
@@ -212,10 +212,9 @@ static int tee_bnxt_fw_probe(struct device *dev)
 
        pvt_data.dev = dev;
 
-       fw_shm_pool = tee_shm_alloc(pvt_data.ctx, MAX_SHM_MEM_SZ,
-                                   TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
+       fw_shm_pool = tee_shm_alloc_kernel_buf(pvt_data.ctx, MAX_SHM_MEM_SZ);
        if (IS_ERR(fw_shm_pool)) {
-               dev_err(pvt_data.dev, "tee_shm_alloc failed\n");
+               dev_err(pvt_data.dev, "tee_shm_alloc_kernel_buf failed\n");
                err = PTR_ERR(fw_shm_pool);
                goto out_sess;
        }
@@ -242,6 +241,14 @@ static int tee_bnxt_fw_remove(struct device *dev)
        return 0;
 }
 
+static void tee_bnxt_fw_shutdown(struct device *dev)
+{
+       tee_shm_free(pvt_data.fw_shm_pool);
+       tee_client_close_session(pvt_data.ctx, pvt_data.session_id);
+       tee_client_close_context(pvt_data.ctx);
+       pvt_data.ctx = NULL;
+}
+
 static const struct tee_client_device_id tee_bnxt_fw_id_table[] = {
        {UUID_INIT(0x6272636D, 0x2019, 0x0716,
                    0x42, 0x43, 0x4D, 0x5F, 0x53, 0x43, 0x48, 0x49)},
@@ -257,6 +264,7 @@ static struct tee_client_driver tee_bnxt_fw_driver = {
                .bus            = &tee_bus_type,
                .probe          = tee_bnxt_fw_probe,
                .remove         = tee_bnxt_fw_remove,
+               .shutdown       = tee_bnxt_fw_shutdown,
        },
 };
 
index ea7ca74..73bdbd2 100644 (file)
@@ -221,7 +221,7 @@ static int cper_mem_err_location(struct cper_mem_err_compact *mem, char *msg)
                return 0;
 
        n = 0;
-       len = CPER_REC_LEN - 1;
+       len = CPER_REC_LEN;
        if (mem->validation_bits & CPER_MEM_VALID_NODE)
                n += scnprintf(msg + n, len - n, "node: %d ", mem->node);
        if (mem->validation_bits & CPER_MEM_VALID_CARD)
@@ -258,13 +258,12 @@ static int cper_mem_err_location(struct cper_mem_err_compact *mem, char *msg)
                n += scnprintf(msg + n, len - n, "responder_id: 0x%016llx ",
                               mem->responder_id);
        if (mem->validation_bits & CPER_MEM_VALID_TARGET_ID)
-               scnprintf(msg + n, len - n, "target_id: 0x%016llx ",
-                         mem->target_id);
+               n += scnprintf(msg + n, len - n, "target_id: 0x%016llx ",
+                              mem->target_id);
        if (mem->validation_bits & CPER_MEM_VALID_CHIP_ID)
-               scnprintf(msg + n, len - n, "chip_id: %d ",
-                         mem->extended >> CPER_MEM_CHIP_ID_SHIFT);
+               n += scnprintf(msg + n, len - n, "chip_id: %d ",
+                              mem->extended >> CPER_MEM_CHIP_ID_SHIFT);
 
-       msg[n] = '\0';
        return n;
 }
 
@@ -633,7 +632,7 @@ int cper_estatus_check(const struct acpi_hest_generic_status *estatus)
        data_len = estatus->data_length;
 
        apei_estatus_for_each_section(estatus, gdata) {
-               if (sizeof(struct acpi_hest_generic_data) > data_len)
+               if (acpi_hest_get_size(gdata) > data_len)
                        return -EINVAL;
 
                record_size = acpi_hest_get_record_size(gdata);
index 7bf0a7a..2363fee 100644 (file)
@@ -35,15 +35,48 @@ efi_status_t check_platform_features(void)
 }
 
 /*
- * Although relocatable kernels can fix up the misalignment with respect to
- * MIN_KIMG_ALIGN, the resulting virtual text addresses are subtly out of
- * sync with those recorded in the vmlinux when kaslr is disabled but the
- * image required relocation anyway. Therefore retain 2M alignment unless
- * KASLR is in use.
+ * Distro versions of GRUB may ignore the BSS allocation entirely (i.e., fail
+ * to provide space, and fail to zero it). Check for this condition by double
+ * checking that the first and the last byte of the image are covered by the
+ * same EFI memory map entry.
  */
-static u64 min_kimg_align(void)
+static bool check_image_region(u64 base, u64 size)
 {
-       return efi_nokaslr ? MIN_KIMG_ALIGN : EFI_KIMG_ALIGN;
+       unsigned long map_size, desc_size, buff_size;
+       efi_memory_desc_t *memory_map;
+       struct efi_boot_memmap map;
+       efi_status_t status;
+       bool ret = false;
+       int map_offset;
+
+       map.map =       &memory_map;
+       map.map_size =  &map_size;
+       map.desc_size = &desc_size;
+       map.desc_ver =  NULL;
+       map.key_ptr =   NULL;
+       map.buff_size = &buff_size;
+
+       status = efi_get_memory_map(&map);
+       if (status != EFI_SUCCESS)
+               return false;
+
+       for (map_offset = 0; map_offset < map_size; map_offset += desc_size) {
+               efi_memory_desc_t *md = (void *)memory_map + map_offset;
+               u64 end = md->phys_addr + md->num_pages * EFI_PAGE_SIZE;
+
+               /*
+                * Find the region that covers base, and return whether
+                * it covers base+size bytes.
+                */
+               if (base >= md->phys_addr && base < end) {
+                       ret = (base + size) <= end;
+                       break;
+               }
+       }
+
+       efi_bs_call(free_pool, memory_map);
+
+       return ret;
 }
 
 efi_status_t handle_kernel_image(unsigned long *image_addr,
@@ -56,6 +89,16 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
        unsigned long kernel_size, kernel_memsize = 0;
        u32 phys_seed = 0;
 
+       /*
+        * Although relocatable kernels can fix up the misalignment with
+        * respect to MIN_KIMG_ALIGN, the resulting virtual text addresses are
+        * subtly out of sync with those recorded in the vmlinux when kaslr is
+        * disabled but the image required relocation anyway. Therefore retain
+        * 2M alignment if KASLR was explicitly disabled, even if it was not
+        * going to be activated to begin with.
+        */
+       u64 min_kimg_align = efi_nokaslr ? MIN_KIMG_ALIGN : EFI_KIMG_ALIGN;
+
        if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
                if (!efi_nokaslr) {
                        status = efi_get_random_bytes(sizeof(phys_seed),
@@ -76,6 +119,10 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
        if (image->image_base != _text)
                efi_err("FIRMWARE BUG: efi_loaded_image_t::image_base has bogus value\n");
 
+       if (!IS_ALIGNED((u64)_text, EFI_KIMG_ALIGN))
+               efi_err("FIRMWARE BUG: kernel image not aligned on %ldk boundary\n",
+                       EFI_KIMG_ALIGN >> 10);
+
        kernel_size = _edata - _text;
        kernel_memsize = kernel_size + (_end - _edata);
        *reserve_size = kernel_memsize;
@@ -85,14 +132,18 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
                 * If KASLR is enabled, and we have some randomness available,
                 * locate the kernel at a randomized offset in physical memory.
                 */
-               status = efi_random_alloc(*reserve_size, min_kimg_align(),
+               status = efi_random_alloc(*reserve_size, min_kimg_align,
                                          reserve_addr, phys_seed);
+               if (status != EFI_SUCCESS)
+                       efi_warn("efi_random_alloc() failed: 0x%lx\n", status);
        } else {
                status = EFI_OUT_OF_RESOURCES;
        }
 
        if (status != EFI_SUCCESS) {
-               if (IS_ALIGNED((u64)_text, min_kimg_align())) {
+               if (!check_image_region((u64)_text, kernel_memsize)) {
+                       efi_err("FIRMWARE BUG: Image BSS overlaps adjacent EFI memory region\n");
+               } else if (IS_ALIGNED((u64)_text, min_kimg_align)) {
                        /*
                         * Just execute from wherever we were loaded by the
                         * UEFI PE/COFF loader if the alignment is suitable.
@@ -103,7 +154,7 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
                }
 
                status = efi_allocate_pages_aligned(*reserve_size, reserve_addr,
-                                                   ULONG_MAX, min_kimg_align());
+                                                   ULONG_MAX, min_kimg_align);
 
                if (status != EFI_SUCCESS) {
                        efi_err("Failed to relocate kernel\n");
index a408df4..724155b 100644 (file)
@@ -30,6 +30,8 @@ static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
 
        region_end = min(md->phys_addr + md->num_pages * EFI_PAGE_SIZE - 1,
                         (u64)ULONG_MAX);
+       if (region_end < size)
+               return 0;
 
        first_slot = round_up(md->phys_addr, align);
        last_slot = round_down(region_end - size + 1, align);
index 9f937b1..60ccf3e 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/init.h>
 #include <linux/arm-smccc.h>
 #include <linux/kernel.h>
+#include <linux/platform_device.h>
 #include <asm/archrandom.h>
 
 static u32 smccc_version = ARM_SMCCC_VERSION_1_0;
@@ -42,3 +43,19 @@ u32 arm_smccc_get_version(void)
        return smccc_version;
 }
 EXPORT_SYMBOL_GPL(arm_smccc_get_version);
+
+static int __init smccc_devices_init(void)
+{
+       struct platform_device *pdev;
+
+       if (smccc_trng_available) {
+               pdev = platform_device_register_simple("smccc_trng", -1,
+                                                      NULL, 0);
+               if (IS_ERR(pdev))
+                       pr_err("smccc_trng: could not register device: %ld\n",
+                              PTR_ERR(pdev));
+       }
+
+       return 0;
+}
+device_initcall(smccc_devices_init);
index 4299145..587c82b 100644 (file)
@@ -953,6 +953,8 @@ static int fme_perf_offline_cpu(unsigned int cpu, struct hlist_node *node)
                return 0;
 
        priv->cpu = target;
+       perf_pmu_migrate_context(&priv->pmu, cpu, target);
+
        return 0;
 }
 
index fab5710..81abd89 100644 (file)
@@ -520,6 +520,14 @@ config GPIO_REG
          A 32-bit single register GPIO fixed in/out implementation.  This
          can be used to represent any register as a set of GPIO signals.
 
+config GPIO_ROCKCHIP
+       tristate "Rockchip GPIO support"
+       depends on ARCH_ROCKCHIP || COMPILE_TEST
+       select GPIOLIB_IRQCHIP
+       default ARCH_ROCKCHIP
+       help
+         Say yes here to support GPIO on Rockchip SoCs.
+
 config GPIO_SAMA5D2_PIOBU
        tristate "SAMA5D2 PIOBU GPIO support"
        depends on MFD_SYSCON
index 32a3265..5243e2d 100644 (file)
@@ -128,6 +128,7 @@ obj-$(CONFIG_GPIO_RDA)                      += gpio-rda.o
 obj-$(CONFIG_GPIO_RDC321X)             += gpio-rdc321x.o
 obj-$(CONFIG_GPIO_REALTEK_OTTO)                += gpio-realtek-otto.o
 obj-$(CONFIG_GPIO_REG)                 += gpio-reg.o
+obj-$(CONFIG_GPIO_ROCKCHIP)    += gpio-rockchip.o
 obj-$(CONFIG_ARCH_SA1100)              += gpio-sa1100.o
 obj-$(CONFIG_GPIO_SAMA5D2_PIOBU)       += gpio-sama5d2-piobu.o
 obj-$(CONFIG_GPIO_SCH311X)             += gpio-sch311x.o
index 71c0bea..6bf4104 100644 (file)
@@ -336,8 +336,8 @@ static irqreturn_t dio48e_irq_handler(int irq, void *dev_id)
        unsigned long gpio;
 
        for_each_set_bit(gpio, &irq_mask, 2)
-               generic_handle_irq(irq_find_mapping(chip->irq.domain,
-                       19 + gpio*24));
+               generic_handle_domain_irq(chip->irq.domain,
+                                         19 + gpio*24);
 
        raw_spin_lock(&dio48egpio->lock);
 
index b132afa..34be7dd 100644 (file)
@@ -223,8 +223,8 @@ static irqreturn_t idi_48_irq_handler(int irq, void *dev_id)
                for_each_set_bit(bit_num, &irq_mask, 8) {
                        gpio = bit_num + boundary * 8;
 
-                       generic_handle_irq(irq_find_mapping(chip->irq.domain,
-                               gpio));
+                       generic_handle_domain_irq(chip->irq.domain,
+                                                 gpio);
                }
        }
 
index 55b4029..c68ed1a 100644 (file)
@@ -208,7 +208,7 @@ static irqreturn_t idio_16_irq_handler(int irq, void *dev_id)
        int gpio;
 
        for_each_set_bit(gpio, &idio16gpio->irq_mask, chip->ngpio)
-               generic_handle_irq(irq_find_mapping(chip->irq.domain, gpio));
+               generic_handle_domain_irq(chip->irq.domain, gpio);
 
        raw_spin_lock(&idio16gpio->lock);
 
index b7932ec..b59fae9 100644 (file)
@@ -201,9 +201,8 @@ static void altera_gpio_irq_edge_handler(struct irq_desc *desc)
              (readl(mm_gc->regs + ALTERA_GPIO_EDGE_CAP) &
              readl(mm_gc->regs + ALTERA_GPIO_IRQ_MASK)))) {
                writel(status, mm_gc->regs + ALTERA_GPIO_EDGE_CAP);
-               for_each_set_bit(i, &status, mm_gc->gc.ngpio) {
-                       generic_handle_irq(irq_find_mapping(irqdomain, i));
-               }
+               for_each_set_bit(i, &status, mm_gc->gc.ngpio)
+                       generic_handle_domain_irq(irqdomain, i);
        }
 
        chained_irq_exit(chip, desc);
@@ -228,9 +227,9 @@ static void altera_gpio_irq_leveL_high_handler(struct irq_desc *desc)
        status = readl(mm_gc->regs + ALTERA_GPIO_DATA);
        status &= readl(mm_gc->regs + ALTERA_GPIO_IRQ_MASK);
 
-       for_each_set_bit(i, &status, mm_gc->gc.ngpio) {
-               generic_handle_irq(irq_find_mapping(irqdomain, i));
-       }
+       for_each_set_bit(i, &status, mm_gc->gc.ngpio)
+               generic_handle_domain_irq(irqdomain, i);
+
        chained_irq_exit(chip, desc);
 }
 
index 64e54f8..a99ece1 100644 (file)
@@ -392,7 +392,7 @@ static void aspeed_sgpio_irq_handler(struct irq_desc *desc)
        struct gpio_chip *gc = irq_desc_get_handler_data(desc);
        struct irq_chip *ic = irq_desc_get_chip(desc);
        struct aspeed_sgpio *data = gpiochip_get_data(gc);
-       unsigned int i, p, girq;
+       unsigned int i, p;
        unsigned long reg;
 
        chained_irq_enter(ic, desc);
@@ -402,11 +402,8 @@ static void aspeed_sgpio_irq_handler(struct irq_desc *desc)
 
                reg = ioread32(bank_reg(data, bank, reg_irq_status));
 
-               for_each_set_bit(p, &reg, 32) {
-                       girq = irq_find_mapping(gc->irq.domain, i * 32 + p);
-                       generic_handle_irq(girq);
-               }
-
+               for_each_set_bit(p, &reg, 32)
+                       generic_handle_domain_irq(gc->irq.domain, i * 32 + p);
        }
 
        chained_irq_exit(ic, desc);
index b966f5e..3c8f20c 100644 (file)
@@ -661,7 +661,7 @@ static void aspeed_gpio_irq_handler(struct irq_desc *desc)
        struct gpio_chip *gc = irq_desc_get_handler_data(desc);
        struct irq_chip *ic = irq_desc_get_chip(desc);
        struct aspeed_gpio *data = gpiochip_get_data(gc);
-       unsigned int i, p, girq, banks;
+       unsigned int i, p, banks;
        unsigned long reg;
        struct aspeed_gpio *gpio = gpiochip_get_data(gc);
 
@@ -673,11 +673,8 @@ static void aspeed_gpio_irq_handler(struct irq_desc *desc)
 
                reg = ioread32(bank_reg(data, bank, reg_irq_status));
 
-               for_each_set_bit(p, &reg, 32) {
-                       girq = irq_find_mapping(gc->irq.domain, i * 32 + p);
-                       generic_handle_irq(girq);
-               }
-
+               for_each_set_bit(p, &reg, 32)
+                       generic_handle_domain_irq(gc->irq.domain, i * 32 + p);
        }
 
        chained_irq_exit(ic, desc);
index 9b780dc..3958c6d 100644 (file)
@@ -204,11 +204,8 @@ static void ath79_gpio_irq_handler(struct irq_desc *desc)
 
        raw_spin_unlock_irqrestore(&ctrl->lock, flags);
 
-       if (pending) {
-               for_each_set_bit(irq, &pending, gc->ngpio)
-                       generic_handle_irq(
-                               irq_linear_revmap(gc->irq.domain, irq));
-       }
+       for_each_set_bit(irq, &pending, gc->ngpio)
+               generic_handle_domain_irq(gc->irq.domain, irq);
 
        chained_irq_exit(irqchip, desc);
 }
index 1e6b427..d329a14 100644 (file)
@@ -466,9 +466,6 @@ static void bcm_kona_gpio_irq_handler(struct irq_desc *desc)
                    (~(readl(reg_base + GPIO_INT_MASK(bank_id)))))) {
                for_each_set_bit(bit, &sta, 32) {
                        int hwirq = GPIO_PER_BANK * bank_id + bit;
-                       int child_irq =
-                               irq_find_mapping(bank->kona_gpio->irq_domain,
-                                                hwirq);
                        /*
                         * Clear interrupt before handler is called so we don't
                         * miss any interrupt occurred during executing them.
@@ -476,7 +473,8 @@ static void bcm_kona_gpio_irq_handler(struct irq_desc *desc)
                        writel(readl(reg_base + GPIO_INT_STATUS(bank_id)) |
                               BIT(bit), reg_base + GPIO_INT_STATUS(bank_id));
                        /* Invoke interrupt handler */
-                       generic_handle_irq(child_irq);
+                       generic_handle_domain_irq(bank->kona_gpio->irq_domain,
+                                                 hwirq);
                }
        }
 
index fcfc1a1..74b7c91 100644 (file)
@@ -277,15 +277,14 @@ static void brcmstb_gpio_irq_bank_handler(struct brcmstb_gpio_bank *bank)
        unsigned long status;
 
        while ((status = brcmstb_gpio_get_active_irqs(bank))) {
-               unsigned int irq, offset;
+               unsigned int offset;
 
                for_each_set_bit(offset, &status, 32) {
                        if (offset >= bank->width)
                                dev_warn(&priv->pdev->dev,
                                         "IRQ for invalid GPIO (bank=%d, offset=%d)\n",
                                         bank->id, offset);
-                       irq = irq_linear_revmap(domain, hwbase + offset);
-                       generic_handle_irq(irq);
+                       generic_handle_domain_irq(domain, hwbase + offset);
                }
        }
 }
index 4ab3fcd..562f8f7 100644 (file)
@@ -133,7 +133,7 @@ static void cdns_gpio_irq_handler(struct irq_desc *desc)
                ~ioread32(cgpio->regs + CDNS_GPIO_IRQ_MASK);
 
        for_each_set_bit(hwirq, &status, chip->ngpio)
-               generic_handle_irq(irq_find_mapping(chip->irq.domain, hwirq));
+               generic_handle_domain_irq(chip->irq.domain, hwirq);
 
        chained_irq_exit(irqchip, desc);
 }
index 6f21385..cb5afaa 100644 (file)
@@ -369,8 +369,7 @@ static void gpio_irq_handler(struct irq_desc *desc)
                         */
                        hw_irq = (bank_num / 2) * 32 + bit;
 
-                       generic_handle_irq(
-                               irq_find_mapping(d->irq_domain, hw_irq));
+                       generic_handle_domain_irq(d->irq_domain, hw_irq);
                }
        }
        chained_irq_exit(irq_desc_get_chip(desc), desc);
index 4c5f6d0..026903e 100644 (file)
@@ -395,7 +395,7 @@ static struct irq_chip dln2_gpio_irqchip = {
 static void dln2_gpio_event(struct platform_device *pdev, u16 echo,
                            const void *data, int len)
 {
-       int pin, irq;
+       int pin, ret;
 
        const struct {
                __le16 count;
@@ -416,24 +416,20 @@ static void dln2_gpio_event(struct platform_device *pdev, u16 echo,
                return;
        }
 
-       irq = irq_find_mapping(dln2->gpio.irq.domain, pin);
-       if (!irq) {
-               dev_err(dln2->gpio.parent, "pin %d not mapped to IRQ\n", pin);
-               return;
-       }
-
        switch (dln2->irq_type[pin]) {
        case DLN2_GPIO_EVENT_CHANGE_RISING:
-               if (event->value)
-                       generic_handle_irq(irq);
+               if (!event->value)
+                       return;
                break;
        case DLN2_GPIO_EVENT_CHANGE_FALLING:
-               if (!event->value)
-                       generic_handle_irq(irq);
+               if (event->value)
+                       return;
                break;
-       default:
-               generic_handle_irq(irq);
        }
+
+       ret = generic_handle_domain_irq(dln2->gpio.irq.domain, pin);
+       if (unlikely(ret))
+               dev_err(dln2->gpio.parent, "pin %d not mapped to IRQ\n", pin);
 }
 
 static int dln2_gpio_probe(struct platform_device *pdev)
index 17a243c..90b336e 100644 (file)
@@ -173,7 +173,7 @@ static irqreturn_t em_gio_irq_handler(int irq, void *dev_id)
        while ((pending = em_gio_read(p, GIO_MST))) {
                offset = __ffs(pending);
                em_gio_write(p, GIO_IIR, BIT(offset));
-               generic_handle_irq(irq_find_mapping(p->irq_domain, offset));
+               generic_handle_domain_irq(p->irq_domain, offset);
                irqs_handled++;
        }
 
index ef148b2..2e17797 100644 (file)
@@ -128,13 +128,13 @@ static void ep93xx_gpio_ab_irq_handler(struct irq_desc *desc)
         */
        stat = readb(epg->base + EP93XX_GPIO_A_INT_STATUS);
        for_each_set_bit(offset, &stat, 8)
-               generic_handle_irq(irq_find_mapping(epg->gc[0].gc.irq.domain,
-                                                   offset));
+               generic_handle_domain_irq(epg->gc[0].gc.irq.domain,
+                                         offset);
 
        stat = readb(epg->base + EP93XX_GPIO_B_INT_STATUS);
        for_each_set_bit(offset, &stat, 8)
-               generic_handle_irq(irq_find_mapping(epg->gc[1].gc.irq.domain,
-                                                   offset));
+               generic_handle_domain_irq(epg->gc[1].gc.irq.domain,
+                                         offset);
 
        chained_irq_exit(irqchip, desc);
 }
index 4031164..b90a45c 100644 (file)
@@ -149,8 +149,7 @@ static void ftgpio_gpio_irq_handler(struct irq_desc *desc)
        stat = readl(g->base + GPIO_INT_STAT_RAW);
        if (stat)
                for_each_set_bit(offset, &stat, gc->ngpio)
-                       generic_handle_irq(irq_find_mapping(gc->irq.domain,
-                                                           offset));
+                       generic_handle_domain_irq(gc->irq.domain, offset);
 
        chained_irq_exit(irqchip, desc);
 }
index ad3d4da..3caabef 100644 (file)
@@ -186,8 +186,8 @@ static void hisi_gpio_irq_handler(struct irq_desc *desc)
 
        chained_irq_enter(irq_c, desc);
        for_each_set_bit(hwirq, &irq_msk, HISI_GPIO_LINE_NUM_MAX)
-               generic_handle_irq(irq_find_mapping(hisi_gpio->chip.irq.domain,
-                                                   hwirq));
+               generic_handle_domain_irq(hisi_gpio->chip.irq.domain,
+                                         hwirq);
        chained_irq_exit(irq_c, desc);
 }
 
index 4a17599..641719a 100644 (file)
@@ -97,11 +97,8 @@ static void hlwd_gpio_irqhandler(struct irq_desc *desc)
 
        chained_irq_enter(chip, desc);
 
-       for_each_set_bit(hwirq, &pending, 32) {
-               int irq = irq_find_mapping(hlwd->gpioc.irq.domain, hwirq);
-
-               generic_handle_irq(irq);
-       }
+       for_each_set_bit(hwirq, &pending, 32)
+               generic_handle_domain_irq(hlwd->gpioc.irq.domain, hwirq);
 
        chained_irq_exit(chip, desc);
 }
index 22f3ce2..42c4d9d 100644 (file)
@@ -359,12 +359,8 @@ static void mrfld_irq_handler(struct irq_desc *desc)
                /* Only interrupts that are enabled */
                pending &= enabled;
 
-               for_each_set_bit(gpio, &pending, 32) {
-                       unsigned int irq;
-
-                       irq = irq_find_mapping(gc->irq.domain, base + gpio);
-                       generic_handle_irq(irq);
-               }
+               for_each_set_bit(gpio, &pending, 32)
+                       generic_handle_domain_irq(gc->irq.domain, base + gpio);
        }
 
        chained_irq_exit(irqchip, desc);
index 4b9157a..67dc389 100644 (file)
@@ -120,7 +120,7 @@ static irqreturn_t mpc8xxx_gpio_irq_cascade(int irq, void *data)
        mask = gc->read_reg(mpc8xxx_gc->regs + GPIO_IER)
                & gc->read_reg(mpc8xxx_gc->regs + GPIO_IMR);
        for_each_set_bit(i, &mask, 32)
-               generic_handle_irq(irq_linear_revmap(mpc8xxx_gc->irq, 31 - i));
+               generic_handle_domain_irq(mpc8xxx_gc->irq, 31 - i);
 
        return IRQ_HANDLED;
 }
@@ -405,7 +405,7 @@ static int mpc8xxx_probe(struct platform_device *pdev)
 
        ret = devm_request_irq(&pdev->dev, mpc8xxx_gc->irqn,
                               mpc8xxx_gpio_irq_cascade,
-                              IRQF_SHARED, "gpio-cascade",
+                              IRQF_NO_THREAD | IRQF_SHARED, "gpio-cascade",
                               mpc8xxx_gc);
        if (ret) {
                dev_err(&pdev->dev,
index 82fb20d..10c0a9b 100644 (file)
@@ -95,9 +95,7 @@ mediatek_gpio_irq_handler(int irq, void *data)
        pending = mtk_gpio_r32(rg, GPIO_REG_STAT);
 
        for_each_set_bit(bit, &pending, MTK_BANK_WIDTH) {
-               u32 map = irq_find_mapping(gc->irq.domain, bit);
-
-               generic_handle_irq(map);
+               generic_handle_domain_irq(gc->irq.domain, bit);
                mtk_gpio_w32(rg, GPIO_REG_STAT, BIT(bit));
                ret |= IRQ_HANDLED;
        }
index b9fdf05..c871602 100644 (file)
@@ -241,7 +241,7 @@ static void mxc_gpio_irq_handler(struct mxc_gpio_port *port, u32 irq_stat)
                if (port->both_edges & (1 << irqoffset))
                        mxc_flip_edge(port, irqoffset);
 
-               generic_handle_irq(irq_find_mapping(port->domain, irqoffset));
+               generic_handle_domain_irq(port->domain, irqoffset);
 
                irq_stat &= ~(1 << irqoffset);
        }
index 31a336b..c5166cd 100644 (file)
@@ -157,7 +157,7 @@ static void mxs_gpio_irq_handler(struct irq_desc *desc)
                if (port->both_edges & (1 << irqoffset))
                        mxs_flip_edge(port, irqoffset);
 
-               generic_handle_irq(irq_find_mapping(port->domain, irqoffset));
+               generic_handle_domain_irq(port->domain, irqoffset);
                irq_stat &= ~(1 << irqoffset);
        }
 }
index ca23f72..415e8df 100644 (file)
@@ -611,8 +611,7 @@ static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank)
 
                        raw_spin_lock_irqsave(&bank->wa_lock, wa_lock_flags);
 
-                       generic_handle_irq(irq_find_mapping(bank->chip.irq.domain,
-                                                           bit));
+                       generic_handle_domain_irq(bank->chip.irq.domain, bit);
 
                        raw_spin_unlock_irqrestore(&bank->wa_lock,
                                                   wa_lock_flags);
index 9acec76..71a13a3 100644 (file)
@@ -260,7 +260,7 @@ static irqreturn_t idio_16_irq_handler(int irq, void *dev_id)
                return IRQ_NONE;
 
        for_each_set_bit(gpio, &idio16gpio->irq_mask, chip->ngpio)
-               generic_handle_irq(irq_find_mapping(chip->irq.domain, gpio));
+               generic_handle_domain_irq(chip->irq.domain, gpio);
 
        raw_spin_lock(&idio16gpio->lock);
 
index 2a07fd9..8a9b98f 100644 (file)
@@ -468,8 +468,7 @@ static irqreturn_t idio_24_irq_handler(int irq, void *dev_id)
        irq_mask = idio24gpio->irq_mask & irq_status;
 
        for_each_set_bit(gpio, &irq_mask, chip->ngpio - 24)
-               generic_handle_irq(irq_find_mapping(chip->irq.domain,
-                       gpio + 24));
+               generic_handle_domain_irq(chip->irq.domain, gpio + 24);
 
        raw_spin_lock(&idio24gpio->lock);
 
index f1b53dd..4ecab70 100644 (file)
@@ -223,8 +223,8 @@ static void pl061_irq_handler(struct irq_desc *desc)
        pending = readb(pl061->base + GPIOMIS);
        if (pending) {
                for_each_set_bit(offset, &pending, PL061_GPIO_NR)
-                       generic_handle_irq(irq_find_mapping(gc->irq.domain,
-                                                           offset));
+                       generic_handle_domain_irq(gc->irq.domain,
+                                                 offset);
        }
 
        chained_irq_exit(irqchip, desc);
index 0cb6600..382468e 100644 (file)
@@ -455,9 +455,8 @@ static irqreturn_t pxa_gpio_demux_handler(int in_irq, void *d)
                        for_each_set_bit(n, &gedr, BITS_PER_LONG) {
                                loop = 1;
 
-                               generic_handle_irq(
-                                       irq_find_mapping(pchip->irqdomain,
-                                                        gpio + n));
+                               generic_handle_domain_irq(pchip->irqdomain,
+                                                         gpio + n);
                        }
                }
                handled += loop;
@@ -471,9 +470,9 @@ static irqreturn_t pxa_gpio_direct_handler(int in_irq, void *d)
        struct pxa_gpio_chip *pchip = d;
 
        if (in_irq == pchip->irq0) {
-               generic_handle_irq(irq_find_mapping(pchip->irqdomain, 0));
+               generic_handle_domain_irq(pchip->irqdomain, 0);
        } else if (in_irq == pchip->irq1) {
-               generic_handle_irq(irq_find_mapping(pchip->irqdomain, 1));
+               generic_handle_domain_irq(pchip->irqdomain, 1);
        } else {
                pr_err("%s() unknown irq %d\n", __func__, in_irq);
                return IRQ_NONE;
index e7092d5..b378aba 100644 (file)
@@ -213,8 +213,8 @@ static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id)
                          gpio_rcar_read(p, INTMSK))) {
                offset = __ffs(pending);
                gpio_rcar_write(p, INTCLR, BIT(offset));
-               generic_handle_irq(irq_find_mapping(p->gpio_chip.irq.domain,
-                                                   offset));
+               generic_handle_domain_irq(p->gpio_chip.irq.domain,
+                                         offset);
                irqs_handled++;
        }
 
index 28dcbb5..4638464 100644 (file)
@@ -181,7 +181,7 @@ static void rda_gpio_irq_handler(struct irq_desc *desc)
        struct irq_chip *ic = irq_desc_get_chip(desc);
        struct rda_gpio *rda_gpio = gpiochip_get_data(chip);
        unsigned long status;
-       u32 n, girq;
+       u32 n;
 
        chained_irq_enter(ic, desc);
 
@@ -189,10 +189,8 @@ static void rda_gpio_irq_handler(struct irq_desc *desc)
        /* Only lower 8 bits are capable of generating interrupts */
        status &= RDA_GPIO_IRQ_MASK;
 
-       for_each_set_bit(n, &status, RDA_GPIO_BANK_NR) {
-               girq = irq_find_mapping(chip->irq.domain, n);
-               generic_handle_irq(girq);
-       }
+       for_each_set_bit(n, &status, RDA_GPIO_BANK_NR)
+               generic_handle_domain_irq(chip->irq.domain, n);
 
        chained_irq_exit(ic, desc);
 }
index cb64fb5..eeeb39b 100644 (file)
@@ -196,7 +196,6 @@ static void realtek_gpio_irq_handler(struct irq_desc *desc)
        struct irq_chip *irq_chip = irq_desc_get_chip(desc);
        unsigned int lines_done;
        unsigned int port_pin_count;
-       unsigned int irq;
        unsigned long status;
        int offset;
 
@@ -205,10 +204,8 @@ static void realtek_gpio_irq_handler(struct irq_desc *desc)
        for (lines_done = 0; lines_done < gc->ngpio; lines_done += 8) {
                status = realtek_gpio_read_isr(ctrl, lines_done / 8);
                port_pin_count = min(gc->ngpio - lines_done, 8U);
-               for_each_set_bit(offset, &status, port_pin_count) {
-                       irq = irq_find_mapping(gc->irq.domain, offset);
-                       generic_handle_irq(irq);
-               }
+               for_each_set_bit(offset, &status, port_pin_count)
+                       generic_handle_domain_irq(gc->irq.domain, offset);
        }
 
        chained_irq_exit(irq_chip, desc);
diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c
new file mode 100644 (file)
index 0000000..036b2d9
--- /dev/null
@@ -0,0 +1,771 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2013 MundoReader S.L.
+ * Author: Heiko Stuebner <heiko@sntech.de>
+ *
+ * Copyright (c) 2021 Rockchip Electronics Co. Ltd.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/driver.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/regmap.h>
+
+#include "../pinctrl/core.h"
+#include "../pinctrl/pinctrl-rockchip.h"
+
+#define GPIO_TYPE_V1           (0)           /* GPIO Version ID reserved */
+#define GPIO_TYPE_V2           (0x01000C2B)  /* GPIO Version ID 0x01000C2B */
+
+static const struct rockchip_gpio_regs gpio_regs_v1 = {
+       .port_dr = 0x00,
+       .port_ddr = 0x04,
+       .int_en = 0x30,
+       .int_mask = 0x34,
+       .int_type = 0x38,
+       .int_polarity = 0x3c,
+       .int_status = 0x40,
+       .int_rawstatus = 0x44,
+       .debounce = 0x48,
+       .port_eoi = 0x4c,
+       .ext_port = 0x50,
+};
+
+static const struct rockchip_gpio_regs gpio_regs_v2 = {
+       .port_dr = 0x00,
+       .port_ddr = 0x08,
+       .int_en = 0x10,
+       .int_mask = 0x18,
+       .int_type = 0x20,
+       .int_polarity = 0x28,
+       .int_bothedge = 0x30,
+       .int_status = 0x50,
+       .int_rawstatus = 0x58,
+       .debounce = 0x38,
+       .dbclk_div_en = 0x40,
+       .dbclk_div_con = 0x48,
+       .port_eoi = 0x60,
+       .ext_port = 0x70,
+       .version_id = 0x78,
+};
+
+static inline void gpio_writel_v2(u32 val, void __iomem *reg)
+{
+       writel((val & 0xffff) | 0xffff0000, reg);
+       writel((val >> 16) | 0xffff0000, reg + 0x4);
+}
+
+static inline u32 gpio_readl_v2(void __iomem *reg)
+{
+       return readl(reg + 0x4) << 16 | readl(reg);
+}
+
+static inline void rockchip_gpio_writel(struct rockchip_pin_bank *bank,
+                                       u32 value, unsigned int offset)
+{
+       void __iomem *reg = bank->reg_base + offset;
+
+       if (bank->gpio_type == GPIO_TYPE_V2)
+               gpio_writel_v2(value, reg);
+       else
+               writel(value, reg);
+}
+
+static inline u32 rockchip_gpio_readl(struct rockchip_pin_bank *bank,
+                                     unsigned int offset)
+{
+       void __iomem *reg = bank->reg_base + offset;
+       u32 value;
+
+       if (bank->gpio_type == GPIO_TYPE_V2)
+               value = gpio_readl_v2(reg);
+       else
+               value = readl(reg);
+
+       return value;
+}
+
+static inline void rockchip_gpio_writel_bit(struct rockchip_pin_bank *bank,
+                                           u32 bit, u32 value,
+                                           unsigned int offset)
+{
+       void __iomem *reg = bank->reg_base + offset;
+       u32 data;
+
+       if (bank->gpio_type == GPIO_TYPE_V2) {
+               if (value)
+                       data = BIT(bit % 16) | BIT(bit % 16 + 16);
+               else
+                       data = BIT(bit % 16 + 16);
+               writel(data, bit >= 16 ? reg + 0x4 : reg);
+       } else {
+               data = readl(reg);
+               data &= ~BIT(bit);
+               if (value)
+                       data |= BIT(bit);
+               writel(data, reg);
+       }
+}
+
+static inline u32 rockchip_gpio_readl_bit(struct rockchip_pin_bank *bank,
+                                         u32 bit, unsigned int offset)
+{
+       void __iomem *reg = bank->reg_base + offset;
+       u32 data;
+
+       if (bank->gpio_type == GPIO_TYPE_V2) {
+               data = readl(bit >= 16 ? reg + 0x4 : reg);
+               data >>= bit % 16;
+       } else {
+               data = readl(reg);
+               data >>= bit;
+       }
+
+       return data & (0x1);
+}
+
+static int rockchip_gpio_get_direction(struct gpio_chip *chip,
+                                      unsigned int offset)
+{
+       struct rockchip_pin_bank *bank = gpiochip_get_data(chip);
+       u32 data;
+
+       data = rockchip_gpio_readl_bit(bank, offset, bank->gpio_regs->port_ddr);
+       if (data & BIT(offset))
+               return GPIO_LINE_DIRECTION_OUT;
+
+       return GPIO_LINE_DIRECTION_IN;
+}
+
+static int rockchip_gpio_set_direction(struct gpio_chip *chip,
+                                      unsigned int offset, bool input)
+{
+       struct rockchip_pin_bank *bank = gpiochip_get_data(chip);
+       unsigned long flags;
+       u32 data = input ? 0 : 1;
+
+       raw_spin_lock_irqsave(&bank->slock, flags);
+       rockchip_gpio_writel_bit(bank, offset, data, bank->gpio_regs->port_ddr);
+       raw_spin_unlock_irqrestore(&bank->slock, flags);
+
+       return 0;
+}
+
+static void rockchip_gpio_set(struct gpio_chip *gc, unsigned int offset,
+                             int value)
+{
+       struct rockchip_pin_bank *bank = gpiochip_get_data(gc);
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&bank->slock, flags);
+       rockchip_gpio_writel_bit(bank, offset, value, bank->gpio_regs->port_dr);
+       raw_spin_unlock_irqrestore(&bank->slock, flags);
+}
+
+static int rockchip_gpio_get(struct gpio_chip *gc, unsigned int offset)
+{
+       struct rockchip_pin_bank *bank = gpiochip_get_data(gc);
+       u32 data;
+
+       data = readl(bank->reg_base + bank->gpio_regs->ext_port);
+       data >>= offset;
+       data &= 1;
+
+       return data;
+}
+
+static int rockchip_gpio_set_debounce(struct gpio_chip *gc,
+                                     unsigned int offset,
+                                     unsigned int debounce)
+{
+       struct rockchip_pin_bank *bank = gpiochip_get_data(gc);
+       const struct rockchip_gpio_regs *reg = bank->gpio_regs;
+       unsigned long flags, div_reg, freq, max_debounce;
+       bool div_debounce_support;
+       unsigned int cur_div_reg;
+       u64 div;
+
+       if (!IS_ERR(bank->db_clk)) {
+               div_debounce_support = true;
+               freq = clk_get_rate(bank->db_clk);
+               max_debounce = (GENMASK(23, 0) + 1) * 2 * 1000000 / freq;
+               if (debounce > max_debounce)
+                       return -EINVAL;
+
+               div = debounce * freq;
+               div_reg = DIV_ROUND_CLOSEST_ULL(div, 2 * USEC_PER_SEC) - 1;
+       } else {
+               div_debounce_support = false;
+       }
+
+       raw_spin_lock_irqsave(&bank->slock, flags);
+
+       /* Only the v1 needs to configure div_en and div_con for dbclk */
+       if (debounce) {
+               if (div_debounce_support) {
+                       /* Configure the max debounce from consumers */
+                       cur_div_reg = readl(bank->reg_base +
+                                           reg->dbclk_div_con);
+                       if (cur_div_reg < div_reg)
+                               writel(div_reg, bank->reg_base +
+                                      reg->dbclk_div_con);
+                       rockchip_gpio_writel_bit(bank, offset, 1,
+                                                reg->dbclk_div_en);
+               }
+
+               rockchip_gpio_writel_bit(bank, offset, 1, reg->debounce);
+       } else {
+               if (div_debounce_support)
+                       rockchip_gpio_writel_bit(bank, offset, 0,
+                                                reg->dbclk_div_en);
+
+               rockchip_gpio_writel_bit(bank, offset, 0, reg->debounce);
+       }
+
+       raw_spin_unlock_irqrestore(&bank->slock, flags);
+
+       /* Enable or disable dbclk at last */
+       if (div_debounce_support) {
+               if (debounce)
+                       clk_prepare_enable(bank->db_clk);
+               else
+                       clk_disable_unprepare(bank->db_clk);
+       }
+
+       return 0;
+}
+
+static int rockchip_gpio_direction_input(struct gpio_chip *gc,
+                                        unsigned int offset)
+{
+       return rockchip_gpio_set_direction(gc, offset, true);
+}
+
+static int rockchip_gpio_direction_output(struct gpio_chip *gc,
+                                         unsigned int offset, int value)
+{
+       rockchip_gpio_set(gc, offset, value);
+
+       return rockchip_gpio_set_direction(gc, offset, false);
+}
+
+/*
+ * gpiolib set_config callback function. The setting of the pin
+ * mux function as 'gpio output' will be handled by the pinctrl subsystem
+ * interface.
+ */
+static int rockchip_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
+                                 unsigned long config)
+{
+       enum pin_config_param param = pinconf_to_config_param(config);
+
+       switch (param) {
+       case PIN_CONFIG_INPUT_DEBOUNCE:
+               rockchip_gpio_set_debounce(gc, offset, true);
+               /*
+                * Rockchip's gpio could only support up to one period
+                * of the debounce clock(pclk), which is far away from
+                * satisftying the requirement, as pclk is usually near
+                * 100MHz shared by all peripherals. So the fact is it
+                * has crippled debounce capability could only be useful
+                * to prevent any spurious glitches from waking up the system
+                * if the gpio is conguired as wakeup interrupt source. Let's
+                * still return -ENOTSUPP as before, to make sure the caller
+                * of gpiod_set_debounce won't change its behaviour.
+                */
+               return -ENOTSUPP;
+       default:
+               return -ENOTSUPP;
+       }
+}
+
+/*
+ * gpiolib gpio_to_irq callback function. Creates a mapping between a GPIO pin
+ * and a virtual IRQ, if not already present.
+ */
+static int rockchip_gpio_to_irq(struct gpio_chip *gc, unsigned int offset)
+{
+       struct rockchip_pin_bank *bank = gpiochip_get_data(gc);
+       unsigned int virq;
+
+       if (!bank->domain)
+               return -ENXIO;
+
+       virq = irq_create_mapping(bank->domain, offset);
+
+       return (virq) ? : -ENXIO;
+}
+
+static const struct gpio_chip rockchip_gpiolib_chip = {
+       .request = gpiochip_generic_request,
+       .free = gpiochip_generic_free,
+       .set = rockchip_gpio_set,
+       .get = rockchip_gpio_get,
+       .get_direction  = rockchip_gpio_get_direction,
+       .direction_input = rockchip_gpio_direction_input,
+       .direction_output = rockchip_gpio_direction_output,
+       .set_config = rockchip_gpio_set_config,
+       .to_irq = rockchip_gpio_to_irq,
+       .owner = THIS_MODULE,
+};
+
+static void rockchip_irq_demux(struct irq_desc *desc)
+{
+       struct irq_chip *chip = irq_desc_get_chip(desc);
+       struct rockchip_pin_bank *bank = irq_desc_get_handler_data(desc);
+       u32 pend;
+
+       dev_dbg(bank->dev, "got irq for bank %s\n", bank->name);
+
+       chained_irq_enter(chip, desc);
+
+       pend = readl_relaxed(bank->reg_base + bank->gpio_regs->int_status);
+
+       while (pend) {
+               unsigned int irq, virq;
+
+               irq = __ffs(pend);
+               pend &= ~BIT(irq);
+               virq = irq_find_mapping(bank->domain, irq);
+
+               if (!virq) {
+                       dev_err(bank->dev, "unmapped irq %d\n", irq);
+                       continue;
+               }
+
+               dev_dbg(bank->dev, "handling irq %d\n", irq);
+
+               /*
+                * Triggering IRQ on both rising and falling edge
+                * needs manual intervention.
+                */
+               if (bank->toggle_edge_mode & BIT(irq)) {
+                       u32 data, data_old, polarity;
+                       unsigned long flags;
+
+                       data = readl_relaxed(bank->reg_base +
+                                            bank->gpio_regs->ext_port);
+                       do {
+                               raw_spin_lock_irqsave(&bank->slock, flags);
+
+                               polarity = readl_relaxed(bank->reg_base +
+                                                        bank->gpio_regs->int_polarity);
+                               if (data & BIT(irq))
+                                       polarity &= ~BIT(irq);
+                               else
+                                       polarity |= BIT(irq);
+                               writel(polarity,
+                                      bank->reg_base +
+                                      bank->gpio_regs->int_polarity);
+
+                               raw_spin_unlock_irqrestore(&bank->slock, flags);
+
+                               data_old = data;
+                               data = readl_relaxed(bank->reg_base +
+                                                    bank->gpio_regs->ext_port);
+                       } while ((data & BIT(irq)) != (data_old & BIT(irq)));
+               }
+
+               generic_handle_irq(virq);
+       }
+
+       chained_irq_exit(chip, desc);
+}
+
+static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
+{
+       struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+       struct rockchip_pin_bank *bank = gc->private;
+       u32 mask = BIT(d->hwirq);
+       u32 polarity;
+       u32 level;
+       u32 data;
+       unsigned long flags;
+       int ret = 0;
+
+       raw_spin_lock_irqsave(&bank->slock, flags);
+
+       rockchip_gpio_writel_bit(bank, d->hwirq, 0,
+                                bank->gpio_regs->port_ddr);
+
+       raw_spin_unlock_irqrestore(&bank->slock, flags);
+
+       if (type & IRQ_TYPE_EDGE_BOTH)
+               irq_set_handler_locked(d, handle_edge_irq);
+       else
+               irq_set_handler_locked(d, handle_level_irq);
+
+       raw_spin_lock_irqsave(&bank->slock, flags);
+
+       level = rockchip_gpio_readl(bank, bank->gpio_regs->int_type);
+       polarity = rockchip_gpio_readl(bank, bank->gpio_regs->int_polarity);
+
+       switch (type) {
+       case IRQ_TYPE_EDGE_BOTH:
+               if (bank->gpio_type == GPIO_TYPE_V2) {
+                       bank->toggle_edge_mode &= ~mask;
+                       rockchip_gpio_writel_bit(bank, d->hwirq, 1,
+                                                bank->gpio_regs->int_bothedge);
+                       goto out;
+               } else {
+                       bank->toggle_edge_mode |= mask;
+                       level |= mask;
+
+                       /*
+                        * Determine gpio state. If 1 next interrupt should be
+                        * falling otherwise rising.
+                        */
+                       data = readl(bank->reg_base + bank->gpio_regs->ext_port);
+                       if (data & mask)
+                               polarity &= ~mask;
+                       else
+                               polarity |= mask;
+               }
+               break;
+       case IRQ_TYPE_EDGE_RISING:
+               bank->toggle_edge_mode &= ~mask;
+               level |= mask;
+               polarity |= mask;
+               break;
+       case IRQ_TYPE_EDGE_FALLING:
+               bank->toggle_edge_mode &= ~mask;
+               level |= mask;
+               polarity &= ~mask;
+               break;
+       case IRQ_TYPE_LEVEL_HIGH:
+               bank->toggle_edge_mode &= ~mask;
+               level &= ~mask;
+               polarity |= mask;
+               break;
+       case IRQ_TYPE_LEVEL_LOW:
+               bank->toggle_edge_mode &= ~mask;
+               level &= ~mask;
+               polarity &= ~mask;
+               break;
+       default:
+               ret = -EINVAL;
+               goto out;
+       }
+
+       rockchip_gpio_writel(bank, level, bank->gpio_regs->int_type);
+       rockchip_gpio_writel(bank, polarity, bank->gpio_regs->int_polarity);
+out:
+       raw_spin_unlock_irqrestore(&bank->slock, flags);
+
+       return ret;
+}
+
+static void rockchip_irq_suspend(struct irq_data *d)
+{
+       struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+       struct rockchip_pin_bank *bank = gc->private;
+
+       bank->saved_masks = irq_reg_readl(gc, bank->gpio_regs->int_mask);
+       irq_reg_writel(gc, ~gc->wake_active, bank->gpio_regs->int_mask);
+}
+
+static void rockchip_irq_resume(struct irq_data *d)
+{
+       struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+       struct rockchip_pin_bank *bank = gc->private;
+
+       irq_reg_writel(gc, bank->saved_masks, bank->gpio_regs->int_mask);
+}
+
+static void rockchip_irq_enable(struct irq_data *d)
+{
+       irq_gc_mask_clr_bit(d);
+}
+
+static void rockchip_irq_disable(struct irq_data *d)
+{
+       irq_gc_mask_set_bit(d);
+}
+
+static int rockchip_interrupts_register(struct rockchip_pin_bank *bank)
+{
+       unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
+       struct irq_chip_generic *gc;
+       int ret;
+
+       bank->domain = irq_domain_add_linear(bank->of_node, 32,
+                                       &irq_generic_chip_ops, NULL);
+       if (!bank->domain) {
+               dev_warn(bank->dev, "could not init irq domain for bank %s\n",
+                        bank->name);
+               return -EINVAL;
+       }
+
+       ret = irq_alloc_domain_generic_chips(bank->domain, 32, 1,
+                                            "rockchip_gpio_irq",
+                                            handle_level_irq,
+                                            clr, 0, 0);
+       if (ret) {
+               dev_err(bank->dev, "could not alloc generic chips for bank %s\n",
+                       bank->name);
+               irq_domain_remove(bank->domain);
+               return -EINVAL;
+       }
+
+       gc = irq_get_domain_generic_chip(bank->domain, 0);
+       if (bank->gpio_type == GPIO_TYPE_V2) {
+               gc->reg_writel = gpio_writel_v2;
+               gc->reg_readl = gpio_readl_v2;
+       }
+
+       gc->reg_base = bank->reg_base;
+       gc->private = bank;
+       gc->chip_types[0].regs.mask = bank->gpio_regs->int_mask;
+       gc->chip_types[0].regs.ack = bank->gpio_regs->port_eoi;
+       gc->chip_types[0].chip.irq_ack = irq_gc_ack_set_bit;
+       gc->chip_types[0].chip.irq_mask = irq_gc_mask_set_bit;
+       gc->chip_types[0].chip.irq_unmask = irq_gc_mask_clr_bit;
+       gc->chip_types[0].chip.irq_enable = rockchip_irq_enable;
+       gc->chip_types[0].chip.irq_disable = rockchip_irq_disable;
+       gc->chip_types[0].chip.irq_set_wake = irq_gc_set_wake;
+       gc->chip_types[0].chip.irq_suspend = rockchip_irq_suspend;
+       gc->chip_types[0].chip.irq_resume = rockchip_irq_resume;
+       gc->chip_types[0].chip.irq_set_type = rockchip_irq_set_type;
+       gc->wake_enabled = IRQ_MSK(bank->nr_pins);
+
+       /*
+        * Linux assumes that all interrupts start out disabled/masked.
+        * Our driver only uses the concept of masked and always keeps
+        * things enabled, so for us that's all masked and all enabled.
+        */
+       rockchip_gpio_writel(bank, 0xffffffff, bank->gpio_regs->int_mask);
+       rockchip_gpio_writel(bank, 0xffffffff, bank->gpio_regs->port_eoi);
+       rockchip_gpio_writel(bank, 0xffffffff, bank->gpio_regs->int_en);
+       gc->mask_cache = 0xffffffff;
+
+       irq_set_chained_handler_and_data(bank->irq,
+                                        rockchip_irq_demux, bank);
+
+       return 0;
+}
+
+static int rockchip_gpiolib_register(struct rockchip_pin_bank *bank)
+{
+       struct gpio_chip *gc;
+       int ret;
+
+       bank->gpio_chip = rockchip_gpiolib_chip;
+
+       gc = &bank->gpio_chip;
+       gc->base = bank->pin_base;
+       gc->ngpio = bank->nr_pins;
+       gc->label = bank->name;
+       gc->parent = bank->dev;
+#ifdef CONFIG_OF_GPIO
+       gc->of_node = of_node_get(bank->of_node);
+#endif
+
+       ret = gpiochip_add_data(gc, bank);
+       if (ret) {
+               dev_err(bank->dev, "failed to add gpiochip %s, %d\n",
+                       gc->label, ret);
+               return ret;
+       }
+
+       /*
+        * For DeviceTree-supported systems, the gpio core checks the
+        * pinctrl's device node for the "gpio-ranges" property.
+        * If it is present, it takes care of adding the pin ranges
+        * for the driver. In this case the driver can skip ahead.
+        *
+        * In order to remain compatible with older, existing DeviceTree
+        * files which don't set the "gpio-ranges" property or systems that
+        * utilize ACPI the driver has to call gpiochip_add_pin_range().
+        */
+       if (!of_property_read_bool(bank->of_node, "gpio-ranges")) {
+               struct device_node *pctlnp = of_get_parent(bank->of_node);
+               struct pinctrl_dev *pctldev = NULL;
+
+               if (!pctlnp)
+                       return -ENODATA;
+
+               pctldev = of_pinctrl_get(pctlnp);
+               if (!pctldev)
+                       return -ENODEV;
+
+               ret = gpiochip_add_pin_range(gc, dev_name(pctldev->dev), 0,
+                                            gc->base, gc->ngpio);
+               if (ret) {
+                       dev_err(bank->dev, "Failed to add pin range\n");
+                       goto fail;
+               }
+       }
+
+       ret = rockchip_interrupts_register(bank);
+       if (ret) {
+               dev_err(bank->dev, "failed to register interrupt, %d\n", ret);
+               goto fail;
+       }
+
+       return 0;
+
+fail:
+       gpiochip_remove(&bank->gpio_chip);
+
+       return ret;
+}
+
+static int rockchip_get_bank_data(struct rockchip_pin_bank *bank)
+{
+       struct resource res;
+       int id = 0;
+
+       if (of_address_to_resource(bank->of_node, 0, &res)) {
+               dev_err(bank->dev, "cannot find IO resource for bank\n");
+               return -ENOENT;
+       }
+
+       bank->reg_base = devm_ioremap_resource(bank->dev, &res);
+       if (IS_ERR(bank->reg_base))
+               return PTR_ERR(bank->reg_base);
+
+       bank->irq = irq_of_parse_and_map(bank->of_node, 0);
+       if (!bank->irq)
+               return -EINVAL;
+
+       bank->clk = of_clk_get(bank->of_node, 0);
+       if (IS_ERR(bank->clk))
+               return PTR_ERR(bank->clk);
+
+       clk_prepare_enable(bank->clk);
+       id = readl(bank->reg_base + gpio_regs_v2.version_id);
+
+       /* If not gpio v2, that is default to v1. */
+       if (id == GPIO_TYPE_V2) {
+               bank->gpio_regs = &gpio_regs_v2;
+               bank->gpio_type = GPIO_TYPE_V2;
+               bank->db_clk = of_clk_get(bank->of_node, 1);
+               if (IS_ERR(bank->db_clk)) {
+                       dev_err(bank->dev, "cannot find debounce clk\n");
+                       clk_disable_unprepare(bank->clk);
+                       return -EINVAL;
+               }
+       } else {
+               bank->gpio_regs = &gpio_regs_v1;
+               bank->gpio_type = GPIO_TYPE_V1;
+       }
+
+       return 0;
+}
+
+static struct rockchip_pin_bank *
+rockchip_gpio_find_bank(struct pinctrl_dev *pctldev, int id)
+{
+       struct rockchip_pinctrl *info;
+       struct rockchip_pin_bank *bank;
+       int i, found = 0;
+
+       info = pinctrl_dev_get_drvdata(pctldev);
+       bank = info->ctrl->pin_banks;
+       for (i = 0; i < info->ctrl->nr_banks; i++, bank++) {
+               if (bank->bank_num == id) {
+                       found = 1;
+                       break;
+               }
+       }
+
+       return found ? bank : NULL;
+}
+
+static int rockchip_gpio_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct device_node *np = dev->of_node;
+       struct device_node *pctlnp = of_get_parent(np);
+       struct pinctrl_dev *pctldev = NULL;
+       struct rockchip_pin_bank *bank = NULL;
+       static int gpio;
+       int id, ret;
+
+       if (!np || !pctlnp)
+               return -ENODEV;
+
+       pctldev = of_pinctrl_get(pctlnp);
+       if (!pctldev)
+               return -EPROBE_DEFER;
+
+       id = of_alias_get_id(np, "gpio");
+       if (id < 0)
+               id = gpio++;
+
+       bank = rockchip_gpio_find_bank(pctldev, id);
+       if (!bank)
+               return -EINVAL;
+
+       bank->dev = dev;
+       bank->of_node = np;
+
+       raw_spin_lock_init(&bank->slock);
+
+       ret = rockchip_get_bank_data(bank);
+       if (ret)
+               return ret;
+
+       ret = rockchip_gpiolib_register(bank);
+       if (ret) {
+               clk_disable_unprepare(bank->clk);
+               return ret;
+       }
+
+       platform_set_drvdata(pdev, bank);
+       dev_info(dev, "probed %pOF\n", np);
+
+       return 0;
+}
+
+static int rockchip_gpio_remove(struct platform_device *pdev)
+{
+       struct rockchip_pin_bank *bank = platform_get_drvdata(pdev);
+
+       clk_disable_unprepare(bank->clk);
+       gpiochip_remove(&bank->gpio_chip);
+
+       return 0;
+}
+
+static const struct of_device_id rockchip_gpio_match[] = {
+       { .compatible = "rockchip,gpio-bank", },
+       { .compatible = "rockchip,rk3188-gpio-bank0" },
+       { },
+};
+
+static struct platform_driver rockchip_gpio_driver = {
+       .probe          = rockchip_gpio_probe,
+       .remove         = rockchip_gpio_remove,
+       .driver         = {
+               .name   = "rockchip-gpio",
+               .of_match_table = rockchip_gpio_match,
+       },
+};
+
+static int __init rockchip_gpio_init(void)
+{
+       return platform_driver_register(&rockchip_gpio_driver);
+}
+postcore_initcall(rockchip_gpio_init);
+
+static void __exit rockchip_gpio_exit(void)
+{
+       platform_driver_unregister(&rockchip_gpio_driver);
+}
+module_exit(rockchip_gpio_exit);
+
+MODULE_DESCRIPTION("Rockchip gpio driver");
+MODULE_ALIAS("platform:rockchip-gpio");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, rockchip_gpio_match);
index a6f0421..0600f71 100644 (file)
@@ -259,7 +259,7 @@ static u32 sch_gpio_gpe_handler(acpi_handle gpe_device, u32 gpe, void *context)
 
        pending = (resume_status << sch->resume_base) | core_status;
        for_each_set_bit(offset, &pending, sch->chip.ngpio)
-               generic_handle_irq(irq_find_mapping(gc->irq.domain, offset));
+               generic_handle_domain_irq(gc->irq.domain, offset);
 
        /* Set returning value depending on whether we handled an interrupt */
        ret = pending ? ACPI_INTERRUPT_HANDLED : ACPI_INTERRUPT_NOT_HANDLED;
index aed988e..c2a2c76 100644 (file)
@@ -84,7 +84,7 @@ static irqreturn_t sdv_gpio_pub_irq_handler(int irq, void *data)
                return IRQ_NONE;
 
        for_each_set_bit(irq_bit, &irq_stat, 32)
-               generic_handle_irq(irq_find_mapping(sd->id, irq_bit));
+               generic_handle_domain_irq(sd->id, irq_bit);
 
        return IRQ_HANDLED;
 }
index 25c37ed..9dd9dab 100644 (file)
@@ -189,7 +189,7 @@ static void sprd_gpio_irq_handler(struct irq_desc *desc)
        struct gpio_chip *chip = irq_desc_get_handler_data(desc);
        struct irq_chip *ic = irq_desc_get_chip(desc);
        struct sprd_gpio *sprd_gpio = gpiochip_get_data(chip);
-       u32 bank, n, girq;
+       u32 bank, n;
 
        chained_irq_enter(ic, desc);
 
@@ -198,13 +198,9 @@ static void sprd_gpio_irq_handler(struct irq_desc *desc)
                unsigned long reg = readl_relaxed(base + SPRD_GPIO_MIS) &
                        SPRD_GPIO_BANK_MASK;
 
-               for_each_set_bit(n, &reg, SPRD_GPIO_BANK_NR) {
-                       girq = irq_find_mapping(chip->irq.domain,
-                                               bank * SPRD_GPIO_BANK_NR + n);
-
-                       generic_handle_irq(girq);
-               }
-
+               for_each_set_bit(n, &reg, SPRD_GPIO_BANK_NR)
+                       generic_handle_domain_irq(chip->irq.domain,
+                                                 bank * SPRD_GPIO_BANK_NR + n);
        }
        chained_irq_exit(ic, desc);
 }
index 866201c..718a508 100644 (file)
@@ -100,7 +100,7 @@ static irqreturn_t tb10x_gpio_irq_cascade(int irq, void *data)
        int i;
 
        for_each_set_bit(i, &bits, 32)
-               generic_handle_irq(irq_find_mapping(tb10x_gpio->domain, i));
+               generic_handle_domain_irq(tb10x_gpio->domain, i);
 
        return IRQ_HANDLED;
 }
index 0025f61..7f5bc10 100644 (file)
@@ -408,6 +408,8 @@ static void tegra_gpio_irq_handler(struct irq_desc *desc)
                lvl = tegra_gpio_readl(tgi, GPIO_INT_LVL(tgi, gpio));
 
                for_each_set_bit(pin, &sta, 8) {
+                       int ret;
+
                        tegra_gpio_writel(tgi, 1 << pin,
                                          GPIO_INT_CLR(tgi, gpio));
 
@@ -420,11 +422,8 @@ static void tegra_gpio_irq_handler(struct irq_desc *desc)
                                chained_irq_exit(chip, desc);
                        }
 
-                       irq = irq_find_mapping(domain, gpio + pin);
-                       if (WARN_ON(irq == 0))
-                               continue;
-
-                       generic_handle_irq(irq);
+                       ret = generic_handle_domain_irq(domain, gpio + pin);
+                       WARN_RATELIMIT(ret, "hwirq = %d", gpio + pin);
                }
        }
 
index d38980b..05c90d7 100644 (file)
@@ -456,7 +456,7 @@ static void tegra186_gpio_irq(struct irq_desc *desc)
 
        for (i = 0; i < gpio->soc->num_ports; i++) {
                const struct tegra_gpio_port *port = &gpio->soc->ports[i];
-               unsigned int pin, irq;
+               unsigned int pin;
                unsigned long value;
                void __iomem *base;
 
@@ -469,11 +469,8 @@ static void tegra186_gpio_irq(struct irq_desc *desc)
                value = readl(base + TEGRA186_GPIO_INTERRUPT_STATUS(1));
 
                for_each_set_bit(pin, &value, port->pins) {
-                       irq = irq_find_mapping(domain, offset + pin);
-                       if (WARN_ON(irq == 0))
-                               continue;
-
-                       generic_handle_irq(irq);
+                       int ret = generic_handle_domain_irq(domain, offset + pin);
+                       WARN_RATELIMIT(ret, "hwirq = %d", offset + pin);
                }
 
 skip:
index 5022e0a..5b10322 100644 (file)
@@ -183,7 +183,7 @@ static void tqmx86_gpio_irq_handler(struct irq_desc *desc)
        struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip);
        struct irq_chip *irq_chip = irq_desc_get_chip(desc);
        unsigned long irq_bits;
-       int i = 0, child_irq;
+       int i = 0;
        u8 irq_status;
 
        chained_irq_enter(irq_chip, desc);
@@ -192,11 +192,9 @@ static void tqmx86_gpio_irq_handler(struct irq_desc *desc)
        tqmx86_gpio_write(gpio, irq_status, TQMX86_GPIIS);
 
        irq_bits = irq_status;
-       for_each_set_bit(i, &irq_bits, TQMX86_NGPI) {
-               child_irq = irq_find_mapping(gpio->chip.irq.domain,
-                                            i + TQMX86_NGPO);
-               generic_handle_irq(child_irq);
-       }
+       for_each_set_bit(i, &irq_bits, TQMX86_NGPI)
+               generic_handle_domain_irq(gpio->chip.irq.domain,
+                                         i + TQMX86_NGPO);
 
        chained_irq_exit(irq_chip, desc);
 }
@@ -238,8 +236,8 @@ static int tqmx86_gpio_probe(struct platform_device *pdev)
        struct resource *res;
        int ret, irq;
 
-       irq = platform_get_irq(pdev, 0);
-       if (irq < 0)
+       irq = platform_get_irq_optional(pdev, 0);
+       if (irq < 0 && irq != -ENXIO)
                return irq;
 
        res = platform_get_resource(pdev, IORESOURCE_IO, 0);
@@ -278,7 +276,7 @@ static int tqmx86_gpio_probe(struct platform_device *pdev)
 
        pm_runtime_enable(&pdev->dev);
 
-       if (irq) {
+       if (irq > 0) {
                struct irq_chip *irq_chip = &gpio->irq_chip;
                u8 irq_status;
 
index 58776f2..e0f2b67 100644 (file)
@@ -149,7 +149,7 @@ static void vf610_gpio_irq_handler(struct irq_desc *desc)
        for_each_set_bit(pin, &irq_isfr, VF610_GPIO_PER_PORT) {
                vf610_gpio_writel(BIT(pin), port->base + PORT_ISFR);
 
-               generic_handle_irq(irq_find_mapping(port->gc.irq.domain, pin));
+               generic_handle_domain_irq(port->gc.irq.domain, pin);
        }
 
        chained_irq_exit(chip, desc);
index 2d89d05..bb02a82 100644 (file)
@@ -339,8 +339,8 @@ static irqreturn_t ws16c48_irq_handler(int irq, void *dev_id)
                for_each_set_bit(port, &int_pending, 3) {
                        int_id = inb(ws16c48gpio->base + 8 + port);
                        for_each_set_bit(gpio, &int_id, 8)
-                               generic_handle_irq(irq_find_mapping(
-                                       chip->irq.domain, gpio + 8*port));
+                               generic_handle_domain_irq(chip->irq.domain,
+                                                         gpio + 8*port);
                }
 
                int_pending = inb(ws16c48gpio->base + 6) & 0x7;
index ad5489a..fa9b4d8 100644 (file)
@@ -185,7 +185,7 @@ static irqreturn_t iproc_gpio_irq_handler(int irq, void *data)
                int_bits = level | event;
 
                for_each_set_bit(bit, &int_bits, gc->ngpio)
-                       generic_handle_irq(irq_linear_revmap(gc->irq.domain, bit));
+                       generic_handle_domain_irq(gc->irq.domain, bit);
        }
 
        return int_bits ? IRQ_HANDLED : IRQ_NONE;
index c329c3a..a1b6633 100644 (file)
@@ -538,7 +538,7 @@ static void xgpio_irqhandler(struct irq_desc *desc)
 
        for_each_set_bit(bit, all, 64) {
                irq_offset = xgpio_from_bit(chip, bit);
-               generic_handle_irq(irq_find_mapping(gc->irq.domain, irq_offset));
+               generic_handle_domain_irq(gc->irq.domain, irq_offset);
        }
 
        chained_irq_exit(irqchip, desc);
index d7b16bb..0d94d3a 100644 (file)
@@ -216,8 +216,7 @@ static void xlp_gpio_generic_handler(struct irq_desc *desc)
                }
 
                if (gpio_stat & BIT(gpio % XLP_GPIO_REGSZ))
-                       generic_handle_irq(irq_find_mapping(
-                                               priv->chip.irq.domain, gpio));
+                       generic_handle_domain_irq(priv->chip.irq.domain, gpio);
        }
        chained_irq_exit(irqchip, desc);
 }
index f0cb8cc..06c6401 100644 (file)
@@ -628,12 +628,8 @@ static void zynq_gpio_handle_bank_irq(struct zynq_gpio *gpio,
        if (!pending)
                return;
 
-       for_each_set_bit(offset, &pending, 32) {
-               unsigned int gpio_irq;
-
-               gpio_irq = irq_find_mapping(irqdomain, offset + bank_offset);
-               generic_handle_irq(gpio_irq);
-       }
+       for_each_set_bit(offset, &pending, 32)
+               generic_handle_domain_irq(irqdomain, offset + bank_offset);
 }
 
 /**
index 84a1b4b..a9ce3b2 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/slab.h>
 #include <linux/power_supply.h>
 #include <linux/pm_runtime.h>
+#include <linux/suspend.h>
 #include <acpi/video.h>
 #include <acpi/actbl.h>
 
@@ -1039,10 +1040,10 @@ void amdgpu_acpi_detect(void)
  */
 bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev)
 {
-#if defined(CONFIG_AMD_PMC) || defined(CONFIG_AMD_PMC_MODULE)
+#if IS_ENABLED(CONFIG_AMD_PMC) && IS_ENABLED(CONFIG_SUSPEND)
        if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) {
                if (adev->flags & AMD_IS_APU)
-                       return true;
+                       return pm_suspend_target_state == PM_SUSPEND_TO_IDLE;
        }
 #endif
        return false;
index 3b5d131..8f53837 100644 (file)
@@ -468,6 +468,46 @@ bool amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device *ade
        return (fw_cap & ATOM_FIRMWARE_CAP_DYNAMIC_BOOT_CFG_ENABLE) ? true : false;
 }
 
+/*
+ * Helper function to query RAS EEPROM address
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Return true if vbios supports ras rom address reporting
+ */
+bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev, uint8_t* i2c_address)
+{
+       struct amdgpu_mode_info *mode_info = &adev->mode_info;
+       int index;
+       u16 data_offset, size;
+       union firmware_info *firmware_info;
+       u8 frev, crev;
+
+       if (i2c_address == NULL)
+               return false;
+
+       *i2c_address = 0;
+
+       index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+                       firmwareinfo);
+
+       if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context,
+                               index, &size, &frev, &crev, &data_offset)) {
+               /* support firmware_info 3.4 + */
+               if ((frev == 3 && crev >=4) || (frev > 3)) {
+                       firmware_info = (union firmware_info *)
+                               (mode_info->atom_context->bios + data_offset);
+                       *i2c_address = firmware_info->v34.ras_rom_i2c_slave_addr;
+               }
+       }
+
+       if (*i2c_address != 0)
+               return true;
+
+       return false;
+}
+
+
 union smu_info {
        struct atom_smu_info_v3_1 v31;
 };
index 1bbbb19..751248b 100644 (file)
@@ -36,6 +36,7 @@ int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev);
 int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev);
 bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev);
 bool amdgpu_atomfirmware_sram_ecc_supported(struct amdgpu_device *adev);
+bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev, uint8_t* i2c_address);
 bool amdgpu_atomfirmware_mem_training_supported(struct amdgpu_device *adev);
 bool amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device *adev);
 int amdgpu_atomfirmware_get_fw_reserved_fb_size(struct amdgpu_device *adev);
index d303e88..f944ed8 100644 (file)
@@ -2777,12 +2777,11 @@ static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
        struct amdgpu_device *adev =
                container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
 
-       mutex_lock(&adev->gfx.gfx_off_mutex);
-       if (!adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
-               if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
-                       adev->gfx.gfx_off_state = true;
-       }
-       mutex_unlock(&adev->gfx.gfx_off_mutex);
+       WARN_ON_ONCE(adev->gfx.gfx_off_state);
+       WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
+
+       if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
+               adev->gfx.gfx_off_state = true;
 }
 
 /**
@@ -3504,13 +3503,13 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        r = amdgpu_device_get_job_timeout_settings(adev);
        if (r) {
                dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
-               goto failed_unmap;
+               return r;
        }
 
        /* early init functions */
        r = amdgpu_device_ip_early_init(adev);
        if (r)
-               goto failed_unmap;
+               return r;
 
        /* doorbell bar mapping and doorbell index init*/
        amdgpu_device_doorbell_init(adev);
@@ -3736,10 +3735,6 @@ release_ras_con:
 failed:
        amdgpu_vf_error_trans_all(adev);
 
-failed_unmap:
-       iounmap(adev->rmmio);
-       adev->rmmio = NULL;
-
        return r;
 }
 
index 43e7b61..ada7bc1 100644 (file)
@@ -299,6 +299,9 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
                                  ip->major, ip->minor,
                                  ip->revision);
 
+                       if (le16_to_cpu(ip->hw_id) == VCN_HWID)
+                               adev->vcn.num_vcn_inst++;
+
                        for (k = 0; k < num_base_address; k++) {
                                /*
                                 * convert the endianness of base addresses in place,
@@ -385,7 +388,7 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
 {
        struct binary_header *bhdr;
        struct harvest_table *harvest_info;
-       int i;
+       int i, vcn_harvest_count = 0;
 
        bhdr = (struct binary_header *)adev->mman.discovery_bin;
        harvest_info = (struct harvest_table *)(adev->mman.discovery_bin +
@@ -397,8 +400,7 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
 
                switch (le32_to_cpu(harvest_info->list[i].hw_id)) {
                case VCN_HWID:
-                       adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
-                       adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
+                       vcn_harvest_count++;
                        break;
                case DMU_HWID:
                        adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
@@ -407,6 +409,10 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
                        break;
                }
        }
+       if (vcn_harvest_count == adev->vcn.num_vcn_inst) {
+               adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
+               adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
+       }
 }
 
 int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
index 361b86b..971c5b8 100644 (file)
@@ -1213,6 +1213,13 @@ static const struct pci_device_id pciidlist[] = {
        {0x1002, 0x740F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN|AMD_EXP_HW_SUPPORT},
        {0x1002, 0x7410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN|AMD_EXP_HW_SUPPORT},
 
+       /* BEIGE_GOBY */
+       {0x1002, 0x7420, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
+       {0x1002, 0x7421, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
+       {0x1002, 0x7422, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
+       {0x1002, 0x7423, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
+       {0x1002, 0x743F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
+
        {0, 0, 0}
 };
 
@@ -1564,6 +1571,8 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
                pci_ignore_hotplug(pdev);
                pci_set_power_state(pdev, PCI_D3cold);
                drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
+       } else if (amdgpu_device_supports_boco(drm_dev)) {
+               /* nothing to do */
        } else if (amdgpu_device_supports_baco(drm_dev)) {
                amdgpu_device_baco_enter(drm_dev);
        }
index a0be077..b4ced45 100644 (file)
@@ -563,24 +563,38 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
 
        mutex_lock(&adev->gfx.gfx_off_mutex);
 
-       if (!enable)
-               adev->gfx.gfx_off_req_count++;
-       else if (adev->gfx.gfx_off_req_count > 0)
+       if (enable) {
+               /* If the count is already 0, it means there's an imbalance bug somewhere.
+                * Note that the bug may be in a different caller than the one which triggers the
+                * WARN_ON_ONCE.
+                */
+               if (WARN_ON_ONCE(adev->gfx.gfx_off_req_count == 0))
+                       goto unlock;
+
                adev->gfx.gfx_off_req_count--;
 
-       if (enable && !adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
-               schedule_delayed_work(&adev->gfx.gfx_off_delay_work, GFX_OFF_DELAY_ENABLE);
-       } else if (!enable && adev->gfx.gfx_off_state) {
-               if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) {
-                       adev->gfx.gfx_off_state = false;
+               if (adev->gfx.gfx_off_req_count == 0 && !adev->gfx.gfx_off_state)
+                       schedule_delayed_work(&adev->gfx.gfx_off_delay_work, GFX_OFF_DELAY_ENABLE);
+       } else {
+               if (adev->gfx.gfx_off_req_count == 0) {
+                       cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
+
+                       if (adev->gfx.gfx_off_state &&
+                           !amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) {
+                               adev->gfx.gfx_off_state = false;
 
-                       if (adev->gfx.funcs->init_spm_golden) {
-                               dev_dbg(adev->dev, "GFXOFF is disabled, re-init SPM golden settings\n");
-                               amdgpu_gfx_init_spm_golden(adev);
+                               if (adev->gfx.funcs->init_spm_golden) {
+                                       dev_dbg(adev->dev,
+                                               "GFXOFF is disabled, re-init SPM golden settings\n");
+                                       amdgpu_gfx_init_spm_golden(adev);
+                               }
                        }
                }
+
+               adev->gfx.gfx_off_req_count++;
        }
 
+unlock:
        mutex_unlock(&adev->gfx.gfx_off_mutex);
 }
 
index 83af307..cd2e18f 100644 (file)
@@ -502,7 +502,7 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
 
        } else if ((client_id == AMDGPU_IRQ_CLIENTID_LEGACY) &&
                   adev->irq.virq[src_id]) {
-               generic_handle_irq(irq_find_mapping(adev->irq.domain, src_id));
+               generic_handle_domain_irq(adev->irq.domain, src_id);
 
        } else if (!adev->irq.client[client_id].sources) {
                DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n",
index 795fa74..92c8e6e 100644 (file)
@@ -920,11 +920,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
                        return -EINVAL;
        }
 
-       /* This assumes only APU display buffers are pinned with (VRAM|GTT).
-        * See function amdgpu_display_supported_domains()
-        */
-       domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
-
        if (bo->tbo.pin_count) {
                uint32_t mem_type = bo->tbo.resource->mem_type;
                uint32_t mem_flags = bo->tbo.resource->placement;
@@ -949,6 +944,11 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
                return 0;
        }
 
+       /* This assumes only APU display buffers are pinned with (VRAM|GTT).
+        * See function amdgpu_display_supported_domains()
+        */
+       domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
+
        if (bo->tbo.base.import_attach)
                dma_buf_pin(bo->tbo.base.import_attach);
 
index f40c871..38222de 100644 (file)
@@ -26,6 +26,7 @@
 #include "amdgpu_ras.h"
 #include <linux/bits.h>
 #include "atom.h"
+#include "amdgpu_atomfirmware.h"
 
 #define EEPROM_I2C_TARGET_ADDR_VEGA20          0xA0
 #define EEPROM_I2C_TARGET_ADDR_ARCTURUS                0xA8
@@ -96,6 +97,9 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
        if (!i2c_addr)
                return false;
 
+       if (amdgpu_atomfirmware_ras_rom_addr(adev, (uint8_t*)i2c_addr))
+               return true;
+
        switch (adev->asic_type) {
        case CHIP_VEGA20:
                *i2c_addr = EEPROM_I2C_TARGET_ADDR_VEGA20;
index 59e0fef..acfa207 100644 (file)
@@ -54,11 +54,12 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
 {
        struct drm_mm_node *node;
 
-       if (!res) {
+       if (!res || res->mem_type == TTM_PL_SYSTEM) {
                cur->start = start;
                cur->size = size;
                cur->remaining = size;
                cur->node = NULL;
+               WARN_ON(res && start + size > res->num_pages << PAGE_SHIFT);
                return;
        }
 
index 044076e..6a23c68 100644 (file)
@@ -1295,6 +1295,16 @@ static bool is_raven_kicker(struct amdgpu_device *adev)
                return false;
 }
 
+static bool check_if_enlarge_doorbell_range(struct amdgpu_device *adev)
+{
+       if ((adev->asic_type == CHIP_RENOIR) &&
+           (adev->gfx.me_fw_version >= 0x000000a5) &&
+           (adev->gfx.me_feature_version >= 52))
+               return true;
+       else
+               return false;
+}
+
 static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
 {
        if (gfx_v9_0_should_disable_gfxoff(adev->pdev))
@@ -3675,7 +3685,16 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
        if (ring->use_doorbell) {
                WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
                                        (adev->doorbell_index.kiq * 2) << 2);
-               WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
+               /* If GC has entered CGPG, ringing doorbell > first page
+                * doesn't wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to
+                * workaround this issue. And this change has to align with firmware
+                * update.
+                */
+               if (check_if_enlarge_doorbell_range(adev))
+                       WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
+                                       (adev->doorbell.size - 4));
+               else
+                       WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
                                        (adev->doorbell_index.userqueue_end * 2) << 2);
        }
 
index 618e5b6..536d41f 100644 (file)
@@ -67,7 +67,7 @@ static int psp_v12_0_init_microcode(struct psp_context *psp)
 
        err = psp_init_asd_microcode(psp, chip_name);
        if (err)
-               goto out;
+               return err;
 
        snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
        err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
@@ -80,7 +80,7 @@ static int psp_v12_0_init_microcode(struct psp_context *psp)
        } else {
                err = amdgpu_ucode_validate(adev->psp.ta_fw);
                if (err)
-                       goto out2;
+                       goto out;
 
                ta_hdr = (const struct ta_firmware_header_v1_0 *)
                                 adev->psp.ta_fw->data;
@@ -105,10 +105,9 @@ static int psp_v12_0_init_microcode(struct psp_context *psp)
 
        return 0;
 
-out2:
+out:
        release_firmware(adev->psp.ta_fw);
        adev->psp.ta_fw = NULL;
-out:
        if (err) {
                dev_err(adev->dev,
                        "psp v12.0: Failed to load firmware \"%s\"\n",
index c7b364e..e883731 100644 (file)
@@ -3026,6 +3026,14 @@ svm_range_get_attr(struct kfd_process *p, uint64_t start, uint64_t size,
        pr_debug("svms 0x%p [0x%llx 0x%llx] nattr 0x%x\n", &p->svms, start,
                 start + size - 1, nattr);
 
+       /* Flush pending deferred work to avoid racing with deferred actions from
+        * previous memory map changes (e.g. munmap). Concurrent memory map changes
+        * can still race with get_attr because we don't hold the mmap lock. But that
+        * would be a race condition in the application anyway, and undefined
+        * behaviour is acceptable in that case.
+        */
+       flush_work(&p->svms.deferred_list_work);
+
        mmap_read_lock(mm);
        if (!svm_range_is_valid(mm, start, size)) {
                pr_debug("invalid range\n");
index d3a2a5f..afa96c8 100644 (file)
@@ -1548,6 +1548,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
        }
 
        hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
+       adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
 
        if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
                adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
@@ -1561,7 +1562,6 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
                         adev->dm.dmcub_fw_version);
        }
 
-       adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
 
        adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
        dmub_srv = adev->dm.dmub_srv;
@@ -2429,9 +2429,9 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
        max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
        min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
 
-       if (caps->ext_caps->bits.oled == 1 ||
+       if (caps->ext_caps->bits.oled == 1 /*||
            caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
-           caps->ext_caps->bits.hdr_aux_backlight_control == 1)
+           caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
                caps->aux_support = true;
 
        if (amdgpu_backlight == 0)
@@ -9605,7 +9605,12 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
                } else if (amdgpu_freesync_vid_mode && aconnector &&
                           is_freesync_video_mode(&new_crtc_state->mode,
                                                  aconnector)) {
-                       set_freesync_fixed_config(dm_new_crtc_state);
+                       struct drm_display_mode *high_mode;
+
+                       high_mode = get_highest_refresh_rate_mode(aconnector, false);
+                       if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
+                               set_freesync_fixed_config(dm_new_crtc_state);
+                       }
                }
 
                ret = dm_atomic_get_state(state, &dm_state);
index 40f617b..4aba0e8 100644 (file)
@@ -584,7 +584,7 @@ static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev,
                handler_data = container_of(handler_list->next, struct amdgpu_dm_irq_handler_data, list);
 
                /*allocate a new amdgpu_dm_irq_handler_data*/
-               handler_data_add = kzalloc(sizeof(*handler_data), GFP_KERNEL);
+               handler_data_add = kzalloc(sizeof(*handler_data), GFP_ATOMIC);
                if (!handler_data_add) {
                        DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n");
                        return;
index 6e0c5c6..a5331b9 100644 (file)
@@ -197,7 +197,7 @@ void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr, struct
 
        REG_UPDATE(DENTIST_DISPCLK_CNTL,
                        DENTIST_DISPCLK_WDIVIDER, dispclk_wdivider);
-//     REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 5, 100);
+       REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 1000);
        REG_UPDATE(DENTIST_DISPCLK_CNTL,
                        DENTIST_DPPCLK_WDIVIDER, dppclk_wdivider);
        REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_CHG_DONE, 1, 5, 100);
index c6f494f..6185f94 100644 (file)
@@ -66,9 +66,11 @@ int rn_get_active_display_cnt_wa(
        for (i = 0; i < context->stream_count; i++) {
                const struct dc_stream_state *stream = context->streams[i];
 
+               /* Extend the WA to DP for Linux*/
                if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A ||
                                stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
-                               stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK)
+                               stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK ||
+                               stream->signal == SIGNAL_TYPE_DISPLAY_PORT)
                        tmds_present = true;
        }
 
index 605e297..a30283f 100644 (file)
@@ -1530,6 +1530,12 @@ void dc_z10_restore(struct dc *dc)
        if (dc->hwss.z10_restore)
                dc->hwss.z10_restore(dc);
 }
+
+void dc_z10_save_init(struct dc *dc)
+{
+       if (dc->hwss.z10_save_init)
+               dc->hwss.z10_save_init(dc);
+}
 #endif
 /*
  * Applies given context to HW and copy it into current context.
index 9fb8c46..a6d0fd2 100644 (file)
@@ -3602,29 +3602,12 @@ static bool dpcd_read_sink_ext_caps(struct dc_link *link)
 bool dp_retrieve_lttpr_cap(struct dc_link *link)
 {
        uint8_t lttpr_dpcd_data[6];
-       bool vbios_lttpr_enable = false;
-       bool vbios_lttpr_interop = false;
-       struct dc_bios *bios = link->dc->ctx->dc_bios;
+       bool vbios_lttpr_enable = link->dc->caps.vbios_lttpr_enable;
+       bool vbios_lttpr_interop = link->dc->caps.vbios_lttpr_aware;
        enum dc_status status = DC_ERROR_UNEXPECTED;
        bool is_lttpr_present = false;
 
        memset(lttpr_dpcd_data, '\0', sizeof(lttpr_dpcd_data));
-       /* Query BIOS to determine if LTTPR functionality is forced on by system */
-       if (bios->funcs->get_lttpr_caps) {
-               enum bp_result bp_query_result;
-               uint8_t is_vbios_lttpr_enable = 0;
-
-               bp_query_result = bios->funcs->get_lttpr_caps(bios, &is_vbios_lttpr_enable);
-               vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable;
-       }
-
-       if (bios->funcs->get_lttpr_interop) {
-               enum bp_result bp_query_result;
-               uint8_t is_vbios_interop_enabled = 0;
-
-               bp_query_result = bios->funcs->get_lttpr_interop(bios, &is_vbios_interop_enabled);
-               vbios_lttpr_interop = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled;
-       }
 
        /*
         * Logic to determine LTTPR mode
index f2b39ec..cde8ed2 100644 (file)
@@ -47,6 +47,9 @@ int dc_setup_system_context(struct dc *dc, struct dc_phy_addr_space_config *pa_c
                 */
                memcpy(&dc->vm_pa_config, pa_config, sizeof(struct dc_phy_addr_space_config));
                dc->vm_pa_config.valid = true;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+               dc_z10_save_init(dc);
+#endif
        }
 
        return num_vmids;
index 8dcea8f..21d7828 100644 (file)
@@ -183,6 +183,8 @@ struct dc_caps {
        unsigned int cursor_cache_size;
        struct dc_plane_cap planes[MAX_PLANES];
        struct dc_color_caps color;
+       bool vbios_lttpr_aware;
+       bool vbios_lttpr_enable;
 };
 
 struct dc_bug_wa {
@@ -1336,6 +1338,7 @@ void dc_hardware_release(struct dc *dc);
 bool dc_set_psr_allow_active(struct dc *dc, bool enable);
 #if defined(CONFIG_DRM_AMD_DC_DCN)
 void dc_z10_restore(struct dc *dc);
+void dc_z10_save_init(struct dc *dc);
 #endif
 
 bool dc_enable_dmub_notifications(struct dc *dc);
index 7fa9fc6..f6e747f 100644 (file)
@@ -464,7 +464,7 @@ void optc2_lock_doublebuffer_enable(struct timing_generator *optc)
 
        REG_UPDATE_2(OTG_GLOBAL_CONTROL1,
                        MASTER_UPDATE_LOCK_DB_X,
-                       h_blank_start - 200 - 1,
+                       (h_blank_start - 200 - 1) / optc1->opp_count,
                        MASTER_UPDATE_LOCK_DB_Y,
                        v_blank_start - 1);
 }
index f3d98e3..bf0a198 100644 (file)
@@ -109,6 +109,7 @@ struct _vcs_dpi_ip_params_st dcn2_1_ip = {
        .max_page_table_levels = 4,
        .pte_chunk_size_kbytes = 2,
        .meta_chunk_size_kbytes = 2,
+       .min_meta_chunk_size_bytes = 256,
        .writeback_chunk_size_kbytes = 2,
        .line_buffer_size_bits = 789504,
        .is_line_buffer_bpp_fixed = 0,
index 596c97d..28e15eb 100644 (file)
@@ -1788,7 +1788,6 @@ static bool dcn30_split_stream_for_mpc_or_odm(
                }
                pri_pipe->next_odm_pipe = sec_pipe;
                sec_pipe->prev_odm_pipe = pri_pipe;
-               ASSERT(sec_pipe->top_pipe == NULL);
 
                if (!sec_pipe->top_pipe)
                        sec_pipe->stream_res.opp = pool->opps[pipe_idx];
@@ -2617,6 +2616,26 @@ static bool dcn30_resource_construct(
        dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
        dc->caps.color.mpc.ocsc = 1;
 
+       /* read VBIOS LTTPR caps */
+       {
+               if (ctx->dc_bios->funcs->get_lttpr_caps) {
+                       enum bp_result bp_query_result;
+                       uint8_t is_vbios_lttpr_enable = 0;
+
+                       bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable);
+                       dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable;
+               }
+
+               if (ctx->dc_bios->funcs->get_lttpr_interop) {
+                       enum bp_result bp_query_result;
+                       uint8_t is_vbios_interop_enabled = 0;
+
+                       bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios,
+                                       &is_vbios_interop_enabled);
+                       dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled;
+               }
+       }
+
        if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
                dc->debug = debug_defaults_drv;
        else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) {
index 9776d17..912285f 100644 (file)
@@ -1622,106 +1622,12 @@ static void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b
        dml_init_instance(&dc->dml, &dcn3_01_soc, &dcn3_01_ip, DML_PROJECT_DCN30);
 }
 
-static void calculate_wm_set_for_vlevel(
-               int vlevel,
-               struct wm_range_table_entry *table_entry,
-               struct dcn_watermarks *wm_set,
-               struct display_mode_lib *dml,
-               display_e2e_pipe_params_st *pipes,
-               int pipe_cnt)
-{
-       double dram_clock_change_latency_cached = dml->soc.dram_clock_change_latency_us;
-
-       ASSERT(vlevel < dml->soc.num_states);
-       /* only pipe 0 is read for voltage and dcf/soc clocks */
-       pipes[0].clks_cfg.voltage = vlevel;
-       pipes[0].clks_cfg.dcfclk_mhz = dml->soc.clock_limits[vlevel].dcfclk_mhz;
-       pipes[0].clks_cfg.socclk_mhz = dml->soc.clock_limits[vlevel].socclk_mhz;
-
-       dml->soc.dram_clock_change_latency_us = table_entry->pstate_latency_us;
-       dml->soc.sr_exit_time_us = table_entry->sr_exit_time_us;
-       dml->soc.sr_enter_plus_exit_time_us = table_entry->sr_enter_plus_exit_time_us;
-
-       wm_set->urgent_ns = get_wm_urgent(dml, pipes, pipe_cnt) * 1000;
-       wm_set->cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(dml, pipes, pipe_cnt) * 1000;
-       wm_set->cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(dml, pipes, pipe_cnt) * 1000;
-       wm_set->cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(dml, pipes, pipe_cnt) * 1000;
-       wm_set->pte_meta_urgent_ns = get_wm_memory_trip(dml, pipes, pipe_cnt) * 1000;
-       wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000;
-       wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000;
-       wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000;
-       dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached;
-
-}
-
-static void dcn301_calculate_wm_and_dlg(
-               struct dc *dc, struct dc_state *context,
-               display_e2e_pipe_params_st *pipes,
-               int pipe_cnt,
-               int vlevel_req)
-{
-       int i, pipe_idx;
-       int vlevel, vlevel_max;
-       struct wm_range_table_entry *table_entry;
-       struct clk_bw_params *bw_params = dc->clk_mgr->bw_params;
-
-       ASSERT(bw_params);
-
-       vlevel_max = bw_params->clk_table.num_entries - 1;
-
-       /* WM Set D */
-       table_entry = &bw_params->wm_table.entries[WM_D];
-       if (table_entry->wm_type == WM_TYPE_RETRAINING)
-               vlevel = 0;
-       else
-               vlevel = vlevel_max;
-       calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.d,
-                                               &context->bw_ctx.dml, pipes, pipe_cnt);
-       /* WM Set C */
-       table_entry = &bw_params->wm_table.entries[WM_C];
-       vlevel = min(max(vlevel_req, 2), vlevel_max);
-       calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.c,
-                                               &context->bw_ctx.dml, pipes, pipe_cnt);
-       /* WM Set B */
-       table_entry = &bw_params->wm_table.entries[WM_B];
-       vlevel = min(max(vlevel_req, 1), vlevel_max);
-       calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.b,
-                                               &context->bw_ctx.dml, pipes, pipe_cnt);
-
-       /* WM Set A */
-       table_entry = &bw_params->wm_table.entries[WM_A];
-       vlevel = min(vlevel_req, vlevel_max);
-       calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.a,
-                                               &context->bw_ctx.dml, pipes, pipe_cnt);
-
-       for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
-               if (!context->res_ctx.pipe_ctx[i].stream)
-                       continue;
-
-               pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
-               pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
-
-               if (dc->config.forced_clocks) {
-                       pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
-                       pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
-               }
-               if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000)
-                       pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0;
-               if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
-                       pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0;
-
-               pipe_idx++;
-       }
-
-       dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
-}
-
 static struct resource_funcs dcn301_res_pool_funcs = {
        .destroy = dcn301_destroy_resource_pool,
        .link_enc_create = dcn301_link_encoder_create,
        .panel_cntl_create = dcn301_panel_cntl_create,
        .validate_bandwidth = dcn30_validate_bandwidth,
-       .calculate_wm_and_dlg = dcn301_calculate_wm_and_dlg,
+       .calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg,
        .update_soc_for_wm_a = dcn30_update_soc_for_wm_a,
        .populate_dml_pipes = dcn30_populate_dml_pipes_from_context,
        .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
index 833ab13..dc7823d 100644 (file)
@@ -146,8 +146,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_03_soc = {
 
                .min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */
                .num_states = 1,
-               .sr_exit_time_us = 26.5,
-               .sr_enter_plus_exit_time_us = 31,
+               .sr_exit_time_us = 35.5,
+               .sr_enter_plus_exit_time_us = 40,
                .urgent_latency_us = 4.0,
                .urgent_latency_pixel_data_only_us = 4.0,
                .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
index 6ac6faf..8a2119d 100644 (file)
@@ -404,6 +404,18 @@ void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx)
                        &pipe_ctx->stream_res.encoder_info_frame);
        }
 }
+void dcn31_z10_save_init(struct dc *dc)
+{
+       union dmub_rb_cmd cmd;
+
+       memset(&cmd, 0, sizeof(cmd));
+       cmd.dcn_restore.header.type = DMUB_CMD__IDLE_OPT;
+       cmd.dcn_restore.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_SAVE_INIT;
+
+       dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
+       dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
+       dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
+}
 
 void dcn31_z10_restore(struct dc *dc)
 {
index 40dfebe..140435e 100644 (file)
@@ -44,6 +44,7 @@ void dcn31_enable_power_gating_plane(
 void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx);
 
 void dcn31_z10_restore(struct dc *dc);
+void dcn31_z10_save_init(struct dc *dc);
 
 void dcn31_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on);
 int dcn31_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config);
index aaf2dbd..b30d923 100644 (file)
@@ -97,6 +97,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
        .set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
        .set_pipe = dcn21_set_pipe,
        .z10_restore = dcn31_z10_restore,
+       .z10_save_init = dcn31_z10_save_init,
        .is_abm_supported = dcn31_is_abm_supported,
        .set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
        .update_visual_confirm_color = dcn20_update_visual_confirm_color,
index 38c010a..cd3248d 100644 (file)
@@ -1968,6 +1968,22 @@ static bool dcn31_resource_construct(
        dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
        dc->caps.color.mpc.ocsc = 1;
 
+       /* read VBIOS LTTPR caps */
+       {
+               if (ctx->dc_bios->funcs->get_lttpr_caps) {
+                       enum bp_result bp_query_result;
+                       uint8_t is_vbios_lttpr_enable = 0;
+
+                       bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable);
+                       dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable;
+               }
+
+               /* interop bit is implicit */
+               {
+                       dc->caps.vbios_lttpr_aware = true;
+               }
+       }
+
        if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
                dc->debug = debug_defaults_drv;
        else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) {
index d25a7d3..6655bb9 100644 (file)
@@ -841,6 +841,9 @@ static bool CalculatePrefetchSchedule(
        else
                *DestinationLinesForPrefetch = dst_y_prefetch_equ;
 
+       // Limit to prevent overflow in DST_Y_PREFETCH register
+       *DestinationLinesForPrefetch = dml_min(*DestinationLinesForPrefetch, 63.75);
+
        dml_print("DML: VStartup: %d\n", VStartup);
        dml_print("DML: TCalc: %f\n", TCalc);
        dml_print("DML: TWait: %f\n", TWait);
index 5ab008e..ad5f2ad 100644 (file)
@@ -237,6 +237,7 @@ struct hw_sequencer_funcs {
                        int width, int height, int offset);
 
        void (*z10_restore)(struct dc *dc);
+       void (*z10_save_init)(struct dc *dc);
 
        void (*update_visual_confirm_color)(struct dc *dc,
                        struct pipe_ctx *pipe_ctx,
index 7c4734f..7fafb8d 100644 (file)
@@ -856,6 +856,11 @@ enum dmub_cmd_idle_opt_type {
         * DCN hardware restore.
         */
        DMUB_CMD__IDLE_OPT_DCN_RESTORE = 0,
+
+       /**
+        * DCN hardware save.
+        */
+       DMUB_CMD__IDLE_OPT_DCN_SAVE_INIT = 1
 };
 
 /**
index 973de34..27c7fa3 100644 (file)
@@ -267,11 +267,13 @@ void dmub_dcn31_set_outbox1_rptr(struct dmub_srv *dmub, uint32_t rptr_offset)
 
 bool dmub_dcn31_is_hw_init(struct dmub_srv *dmub)
 {
-       uint32_t is_hw_init;
+       union dmub_fw_boot_status status;
+       uint32_t is_enable;
 
-       REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_hw_init);
+       status.all = REG_READ(DMCUB_SCRATCH0);
+       REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_enable);
 
-       return is_hw_init != 0;
+       return is_enable != 0 && status.bits.dal_fw;
 }
 
 bool dmub_dcn31_is_supported(struct dmub_srv *dmub)
index 3811e58..4495545 100644 (file)
@@ -590,7 +590,7 @@ struct atom_firmware_info_v3_4 {
        uint8_t  board_i2c_feature_id;            // enum of atom_board_i2c_feature_id_def
        uint8_t  board_i2c_feature_gpio_id;       // i2c id find in gpio_lut data table gpio_id
        uint8_t  board_i2c_feature_slave_addr;
-       uint8_t  reserved3;
+       uint8_t  ras_rom_i2c_slave_addr;
        uint16_t bootup_mvddq_mv;
        uint16_t bootup_mvpp_mv;
        uint32_t zfbstartaddrin16mb;
index 3fea243..dc91eb6 100644 (file)
@@ -26,7 +26,7 @@
 #include "amdgpu_smu.h"
 
 #define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF
-#define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x03
+#define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04
 #define SMU13_DRIVER_IF_VERSION_ALDE 0x07
 
 /* MP Apertures */
index 5627de7..c5e26d6 100644 (file)
@@ -111,7 +111,9 @@ typedef struct {
   uint32_t InWhisperMode        : 1;
   uint32_t spare0               : 1;
   uint32_t ZstateStatus         : 4;
-  uint32_t spare1               :12;
+  uint32_t spare1               : 4;
+  uint32_t DstateFun            : 4;
+  uint32_t DstateDev            : 4;
   // MP1_EXT_SCRATCH2
   uint32_t P2JobHandler         :24;
   uint32_t RsmuPmiP2FinishedCnt : 8;
index 2597910..02e8c6e 100644 (file)
@@ -5127,6 +5127,13 @@ static int vega10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
        return size;
 }
 
+static bool vega10_get_power_profile_mode_quirks(struct pp_hwmgr *hwmgr)
+{
+       struct amdgpu_device *adev = hwmgr->adev;
+
+       return (adev->pdev->device == 0x6860);
+}
+
 static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
 {
        struct vega10_hwmgr *data = hwmgr->backend;
@@ -5163,9 +5170,15 @@ static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
        }
 
 out:
-       smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
+       if (vega10_get_power_profile_mode_quirks(hwmgr))
+               smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
+                                               1 << power_profile_mode,
+                                               NULL);
+       else
+               smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
                                                (!power_profile_mode) ? 0 : 1 << (power_profile_mode - 1),
                                                NULL);
+
        hwmgr->power_profile_mode = power_profile_mode;
 
        return 0;
index c751f71..d92dd2c 100644 (file)
@@ -353,8 +353,7 @@ static void sienna_cichlid_check_bxco_support(struct smu_context *smu)
        struct amdgpu_device *adev = smu->adev;
        uint32_t val;
 
-       if (powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_BACO ||
-           powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_MACO) {
+       if (powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_BACO) {
                val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
                smu_baco->platform_support =
                        (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true :
index 18681dc..bcaaa08 100644 (file)
@@ -256,7 +256,7 @@ static int vangogh_tables_init(struct smu_context *smu)
        return 0;
 
 err3_out:
-       kfree(smu_table->clocks_table);
+       kfree(smu_table->watermarks_table);
 err2_out:
        kfree(smu_table->gpu_metrics_table);
 err1_out:
index d299079..5d82891 100644 (file)
@@ -855,8 +855,6 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
        req.request.sequence = req32.request.sequence;
        req.request.signal = req32.request.signal;
        err = drm_ioctl_kernel(file, drm_wait_vblank_ioctl, &req, DRM_UNLOCKED);
-       if (err)
-               return err;
 
        req32.reply.type = req.reply.type;
        req32.reply.sequence = req.reply.sequence;
@@ -865,7 +863,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
        if (copy_to_user(argp, &req32, sizeof(req32)))
                return -EFAULT;
 
-       return 0;
+       return err;
 }
 
 #if defined(CONFIG_X86)
index 5b6922e..aa667fa 100644 (file)
@@ -2166,7 +2166,8 @@ static void
 init_vbt_missing_defaults(struct drm_i915_private *i915)
 {
        enum port port;
-       int ports = PORT_A | PORT_B | PORT_C | PORT_D | PORT_E | PORT_F;
+       int ports = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) |
+                   BIT(PORT_D) | BIT(PORT_E) | BIT(PORT_F);
 
        if (!HAS_DDI(i915) && !IS_CHERRYVIEW(i915))
                return;
index be716b5..00dade4 100644 (file)
@@ -2463,6 +2463,15 @@ static void intel_ddi_power_up_lanes(struct intel_encoder *encoder,
        }
 }
 
+/* Splitter enable for eDP MSO is limited to certain pipes. */
+static u8 intel_ddi_splitter_pipe_mask(struct drm_i915_private *i915)
+{
+       if (IS_ALDERLAKE_P(i915))
+               return BIT(PIPE_A) | BIT(PIPE_B);
+       else
+               return BIT(PIPE_A);
+}
+
 static void intel_ddi_mso_get_config(struct intel_encoder *encoder,
                                     struct intel_crtc_state *pipe_config)
 {
@@ -2480,8 +2489,7 @@ static void intel_ddi_mso_get_config(struct intel_encoder *encoder,
        if (!pipe_config->splitter.enable)
                return;
 
-       /* Splitter enable is supported for pipe A only. */
-       if (drm_WARN_ON(&i915->drm, pipe != PIPE_A)) {
+       if (drm_WARN_ON(&i915->drm, !(intel_ddi_splitter_pipe_mask(i915) & BIT(pipe)))) {
                pipe_config->splitter.enable = false;
                return;
        }
@@ -2513,10 +2521,6 @@ static void intel_ddi_mso_configure(const struct intel_crtc_state *crtc_state)
                return;
 
        if (crtc_state->splitter.enable) {
-               /* Splitter enable is supported for pipe A only. */
-               if (drm_WARN_ON(&i915->drm, pipe != PIPE_A))
-                       return;
-
                dss1 |= SPLITTER_ENABLE;
                dss1 |= OVERLAP_PIXELS(crtc_state->splitter.pixel_overlap);
                if (crtc_state->splitter.link_count == 2)
@@ -4743,12 +4747,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
 
                dig_port->hpd_pulse = intel_dp_hpd_pulse;
 
-               /* Splitter enable for eDP MSO is limited to certain pipes. */
-               if (dig_port->dp.mso_link_count) {
-                       encoder->pipe_mask = BIT(PIPE_A);
-                       if (IS_ALDERLAKE_P(dev_priv))
-                               encoder->pipe_mask |= BIT(PIPE_B);
-               }
+               if (dig_port->dp.mso_link_count)
+                       encoder->pipe_mask = intel_ddi_splitter_pipe_mask(dev_priv);
        }
 
        /* In theory we don't need the encoder->type check, but leave it just in
index 3bad4e0..0a8a239 100644 (file)
@@ -5746,16 +5746,18 @@ static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
 
        switch (crtc_state->pipe_bpp) {
        case 18:
-               val |= PIPEMISC_DITHER_6_BPC;
+               val |= PIPEMISC_6_BPC;
                break;
        case 24:
-               val |= PIPEMISC_DITHER_8_BPC;
+               val |= PIPEMISC_8_BPC;
                break;
        case 30:
-               val |= PIPEMISC_DITHER_10_BPC;
+               val |= PIPEMISC_10_BPC;
                break;
        case 36:
-               val |= PIPEMISC_DITHER_12_BPC;
+               /* Port output 12BPC defined for ADLP+ */
+               if (DISPLAY_VER(dev_priv) > 12)
+                       val |= PIPEMISC_12_BPC_ADLP;
                break;
        default:
                MISSING_CASE(crtc_state->pipe_bpp);
@@ -5808,15 +5810,27 @@ int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
 
        tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
 
-       switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
-       case PIPEMISC_DITHER_6_BPC:
+       switch (tmp & PIPEMISC_BPC_MASK) {
+       case PIPEMISC_6_BPC:
                return 18;
-       case PIPEMISC_DITHER_8_BPC:
+       case PIPEMISC_8_BPC:
                return 24;
-       case PIPEMISC_DITHER_10_BPC:
+       case PIPEMISC_10_BPC:
                return 30;
-       case PIPEMISC_DITHER_12_BPC:
-               return 36;
+       /*
+        * PORT OUTPUT 12 BPC defined for ADLP+.
+        *
+        * TODO:
+        * For previous platforms with DSI interface, bits 5:7
+        * are used for storing pipe_bpp irrespective of dithering.
+        * Since the value of 12 BPC is not defined for these bits
+        * on older platforms, need to find a workaround for 12 BPC
+        * MIPI DSI HW readout.
+        */
+       case PIPEMISC_12_BPC_ADLP:
+               if (DISPLAY_VER(dev_priv) > 12)
+                       return 36;
+               fallthrough;
        default:
                MISSING_CASE(tmp);
                return 0;
@@ -11361,13 +11375,19 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
                intel_ddi_init(dev_priv, PORT_B);
                intel_ddi_init(dev_priv, PORT_C);
                vlv_dsi_init(dev_priv);
-       } else if (DISPLAY_VER(dev_priv) >= 9) {
+       } else if (DISPLAY_VER(dev_priv) == 10) {
                intel_ddi_init(dev_priv, PORT_A);
                intel_ddi_init(dev_priv, PORT_B);
                intel_ddi_init(dev_priv, PORT_C);
                intel_ddi_init(dev_priv, PORT_D);
                intel_ddi_init(dev_priv, PORT_E);
                intel_ddi_init(dev_priv, PORT_F);
+       } else if (DISPLAY_VER(dev_priv) >= 9) {
+               intel_ddi_init(dev_priv, PORT_A);
+               intel_ddi_init(dev_priv, PORT_B);
+               intel_ddi_init(dev_priv, PORT_C);
+               intel_ddi_init(dev_priv, PORT_D);
+               intel_ddi_init(dev_priv, PORT_E);
        } else if (HAS_DDI(dev_priv)) {
                u32 found;
 
index 4298ae6..86b7ac7 100644 (file)
@@ -6387,13 +6387,13 @@ void intel_display_power_suspend_late(struct drm_i915_private *i915)
        if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) ||
            IS_BROXTON(i915)) {
                bxt_enable_dc9(i915);
-               /* Tweaked Wa_14010685332:icp,jsp,mcc */
-               if (INTEL_PCH_TYPE(i915) >= PCH_ICP && INTEL_PCH_TYPE(i915) <= PCH_MCC)
-                       intel_de_rmw(i915, SOUTH_CHICKEN1,
-                                    SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
        } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
                hsw_enable_pc8(i915);
        }
+
+       /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
+       if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
+               intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
 }
 
 void intel_display_power_resume_early(struct drm_i915_private *i915)
@@ -6402,13 +6402,13 @@ void intel_display_power_resume_early(struct drm_i915_private *i915)
            IS_BROXTON(i915)) {
                gen9_sanitize_dc_state(i915);
                bxt_disable_dc9(i915);
-               /* Tweaked Wa_14010685332:icp,jsp,mcc */
-               if (INTEL_PCH_TYPE(i915) >= PCH_ICP && INTEL_PCH_TYPE(i915) <= PCH_MCC)
-                       intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
-
        } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
                hsw_disable_pc8(i915);
        }
+
+       /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
+       if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
+               intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
 }
 
 void intel_display_power_suspend(struct drm_i915_private *i915)
index 6cc03b9..862c1df 100644 (file)
@@ -3850,23 +3850,18 @@ static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp)
 
 static void intel_dp_check_link_service_irq(struct intel_dp *intel_dp)
 {
-       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
        u8 val;
 
        if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
                return;
 
        if (drm_dp_dpcd_readb(&intel_dp->aux,
-                             DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val) {
-               drm_dbg_kms(&i915->drm, "Error in reading link service irq vector\n");
+                             DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val)
                return;
-       }
 
        if (drm_dp_dpcd_writeb(&intel_dp->aux,
-                              DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1) {
-               drm_dbg_kms(&i915->drm, "Error in writing link service irq vector\n");
+                              DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1)
                return;
-       }
 
        if (val & HDMI_LINK_STATUS_CHANGED)
                intel_dp_handle_hdmi_link_status_change(intel_dp);
index 08bceae..053a3c2 100644 (file)
@@ -206,7 +206,6 @@ int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
 
        return lttpr_count;
 }
-EXPORT_SYMBOL(intel_dp_init_lttpr_and_dprx_caps);
 
 static u8 dp_voltage_max(u8 preemph)
 {
index c4a126c..1257f4f 100644 (file)
@@ -127,6 +127,15 @@ static void intel_timeline_fini(struct rcu_head *rcu)
 
        i915_vma_put(timeline->hwsp_ggtt);
        i915_active_fini(&timeline->active);
+
+       /*
+        * A small race exists between intel_gt_retire_requests_timeout and
+        * intel_timeline_exit which could result in the syncmap not getting
+        * free'd. Rather than work to hard to seal this race, simply cleanup
+        * the syncmap on fini.
+        */
+       i915_syncmap_free(&timeline->sync);
+
        kfree(timeline);
 }
 
index 06024d3..cde0a47 100644 (file)
@@ -3149,6 +3149,7 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
        MMIO_DFH(_MMIO(0xb100), D_BDW, F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(_MMIO(0xb10c), D_BDW, F_CMD_ACCESS, NULL, NULL);
        MMIO_D(_MMIO(0xb110), D_BDW);
+       MMIO_D(GEN9_SCRATCH_LNCF1, D_BDW_PLUS);
 
        MMIO_F(_MMIO(0x24d0), 48, F_CMD_ACCESS | F_CMD_WRITE_PATCH, 0, 0,
                D_BDW_PLUS, NULL, force_nonpriv_write);
index b8ac807..f776c47 100644 (file)
@@ -105,6 +105,8 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
        {RCS0, COMMON_SLICE_CHICKEN2, 0xffff, true}, /* 0x7014 */
        {RCS0, GEN9_CS_DEBUG_MODE1, 0xffff, false}, /* 0x20ec */
        {RCS0, GEN8_L3SQCREG4, 0, false}, /* 0xb118 */
+       {RCS0, GEN9_SCRATCH1, 0, false}, /* 0xb11c */
+       {RCS0, GEN9_SCRATCH_LNCF1, 0, false}, /* 0xb008 */
        {RCS0, GEN7_HALF_SLICE_CHICKEN1, 0xffff, true}, /* 0xe100 */
        {RCS0, HALF_SLICE_CHICKEN2, 0xffff, true}, /* 0xe180 */
        {RCS0, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */
index 77f1911..3acb0b6 100644 (file)
@@ -138,7 +138,7 @@ void i915_globals_unpark(void)
        atomic_inc(&active);
 }
 
-static void __exit __i915_globals_flush(void)
+static void  __i915_globals_flush(void)
 {
        atomic_inc(&active); /* skip shrinking */
 
@@ -148,7 +148,7 @@ static void __exit __i915_globals_flush(void)
        atomic_dec(&active);
 }
 
-void __exit i915_globals_exit(void)
+void i915_globals_exit(void)
 {
        GEM_BUG_ON(atomic_read(&active));
 
index 35c97c3..9666646 100644 (file)
@@ -727,9 +727,18 @@ static void err_print_gt(struct drm_i915_error_state_buf *m,
        if (GRAPHICS_VER(m->i915) >= 12) {
                int i;
 
-               for (i = 0; i < GEN12_SFC_DONE_MAX; i++)
+               for (i = 0; i < GEN12_SFC_DONE_MAX; i++) {
+                       /*
+                        * SFC_DONE resides in the VD forcewake domain, so it
+                        * only exists if the corresponding VCS engine is
+                        * present.
+                        */
+                       if (!HAS_ENGINE(gt->_gt, _VCS(i * 2)))
+                               continue;
+
                        err_printf(m, "  SFC_DONE[%d]: 0x%08x\n", i,
                                   gt->sfc_done[i]);
+               }
 
                err_printf(m, "  GAM_DONE: 0x%08x\n", gt->gam_done);
        }
@@ -1581,6 +1590,14 @@ static void gt_record_regs(struct intel_gt_coredump *gt)
 
        if (GRAPHICS_VER(i915) >= 12) {
                for (i = 0; i < GEN12_SFC_DONE_MAX; i++) {
+                       /*
+                        * SFC_DONE resides in the VD forcewake domain, so it
+                        * only exists if the corresponding VCS engine is
+                        * present.
+                        */
+                       if (!HAS_ENGINE(gt->_gt, _VCS(i * 2)))
+                               continue;
+
                        gt->sfc_done[i] =
                                intel_uncore_read(uncore, GEN12_SFC_DONE(i));
                }
index c039431..c3816f5 100644 (file)
@@ -3064,24 +3064,6 @@ static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
        spin_unlock_irq(&dev_priv->irq_lock);
 }
 
-static void cnp_display_clock_wa(struct drm_i915_private *dev_priv)
-{
-       struct intel_uncore *uncore = &dev_priv->uncore;
-
-       /*
-        * Wa_14010685332:cnp/cmp,tgp,adp
-        * TODO: Clarify which platforms this applies to
-        * TODO: Figure out if this workaround can be applied in the s0ix suspend/resume handlers as
-        * on earlier platforms and whether the workaround is also needed for runtime suspend/resume
-        */
-       if (INTEL_PCH_TYPE(dev_priv) == PCH_CNP ||
-           (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP && INTEL_PCH_TYPE(dev_priv) < PCH_DG1)) {
-               intel_uncore_rmw(uncore, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS,
-                                SBCLK_RUN_REFCLK_DIS);
-               intel_uncore_rmw(uncore, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
-       }
-}
-
 static void gen8_display_irq_reset(struct drm_i915_private *dev_priv)
 {
        struct intel_uncore *uncore = &dev_priv->uncore;
@@ -3115,7 +3097,6 @@ static void gen8_irq_reset(struct drm_i915_private *dev_priv)
        if (HAS_PCH_SPLIT(dev_priv))
                ibx_irq_reset(dev_priv);
 
-       cnp_display_clock_wa(dev_priv);
 }
 
 static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
@@ -3159,8 +3140,6 @@ static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
 
        if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
                GEN3_IRQ_RESET(uncore, SDE);
-
-       cnp_display_clock_wa(dev_priv);
 }
 
 static void gen11_irq_reset(struct drm_i915_private *dev_priv)
index 83b500b..2880ec5 100644 (file)
@@ -1195,6 +1195,7 @@ static int __init i915_init(void)
        err = pci_register_driver(&i915_pci_driver);
        if (err) {
                i915_pmu_exit();
+               i915_globals_exit();
                return err;
        }
 
index 94fde5c..476bb3b 100644 (file)
@@ -422,7 +422,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define   GEN12_HCP_SFC_LOCK_ACK_BIT           REG_BIT(1)
 #define   GEN12_HCP_SFC_USAGE_BIT                      REG_BIT(0)
 
-#define GEN12_SFC_DONE(n)              _MMIO(0x1cc00 + (n) * 0x100)
+#define GEN12_SFC_DONE(n)              _MMIO(0x1cc000 + (n) * 0x1000)
 #define GEN12_SFC_DONE_MAX             4
 
 #define RING_PP_DIR_BASE(base)         _MMIO((base) + 0x228)
@@ -6163,11 +6163,17 @@ enum {
 #define   PIPEMISC_HDR_MODE_PRECISION  (1 << 23) /* icl+ */
 #define   PIPEMISC_OUTPUT_COLORSPACE_YUV  (1 << 11)
 #define   PIPEMISC_PIXEL_ROUNDING_TRUNC        REG_BIT(8) /* tgl+ */
-#define   PIPEMISC_DITHER_BPC_MASK     (7 << 5)
-#define   PIPEMISC_DITHER_8_BPC                (0 << 5)
-#define   PIPEMISC_DITHER_10_BPC       (1 << 5)
-#define   PIPEMISC_DITHER_6_BPC                (2 << 5)
-#define   PIPEMISC_DITHER_12_BPC       (3 << 5)
+/*
+ * For Display < 13, Bits 5-7 of PIPE MISC represent DITHER BPC with
+ * valid values of: 6, 8, 10 BPC.
+ * ADLP+, the bits 5-7 represent PORT OUTPUT BPC with valid values of:
+ * 6, 8, 10, 12 BPC.
+ */
+#define   PIPEMISC_BPC_MASK            (7 << 5)
+#define   PIPEMISC_8_BPC               (0 << 5)
+#define   PIPEMISC_10_BPC              (1 << 5)
+#define   PIPEMISC_6_BPC               (2 << 5)
+#define   PIPEMISC_12_BPC_ADLP         (4 << 5) /* adlp+ */
 #define   PIPEMISC_DITHER_ENABLE       (1 << 4)
 #define   PIPEMISC_DITHER_TYPE_MASK    (3 << 2)
 #define   PIPEMISC_DITHER_TYPE_SP      (0 << 2)
index 7eaa92f..e0a10f3 100644 (file)
@@ -325,7 +325,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
                        info->pipe_mask &= ~BIT(PIPE_C);
                        info->cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
                }
-       } else if (HAS_DISPLAY(dev_priv) && GRAPHICS_VER(dev_priv) >= 9) {
+       } else if (HAS_DISPLAY(dev_priv) && DISPLAY_VER(dev_priv) >= 9) {
                u32 dfsm = intel_de_read(dev_priv, SKL_DFSM);
 
                if (dfsm & SKL_DFSM_PIPE_A_DISABLE) {
@@ -340,7 +340,8 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
                        info->pipe_mask &= ~BIT(PIPE_C);
                        info->cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
                }
-               if (GRAPHICS_VER(dev_priv) >= 12 &&
+
+               if (DISPLAY_VER(dev_priv) >= 12 &&
                    (dfsm & TGL_DFSM_PIPE_D_DISABLE)) {
                        info->pipe_mask &= ~BIT(PIPE_D);
                        info->cpu_transcoder_mask &= ~BIT(TRANSCODER_D);
@@ -352,10 +353,10 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
                if (dfsm & SKL_DFSM_DISPLAY_PM_DISABLE)
                        info->display.has_fbc = 0;
 
-               if (GRAPHICS_VER(dev_priv) >= 11 && (dfsm & ICL_DFSM_DMC_DISABLE))
+               if (DISPLAY_VER(dev_priv) >= 11 && (dfsm & ICL_DFSM_DMC_DISABLE))
                        info->display.has_dmc = 0;
 
-               if (GRAPHICS_VER(dev_priv) >= 10 &&
+               if (DISPLAY_VER(dev_priv) >= 10 &&
                    (dfsm & CNL_DFSM_DISPLAY_DSC_DISABLE))
                        info->display.has_dsc = 0;
        }
index 8710f55..bd1f9f0 100644 (file)
@@ -683,7 +683,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
                break;
        }
 
-       ipu_dmfc_config_wait4eot(ipu_plane->dmfc, drm_rect_width(dst));
+       ipu_dmfc_config_wait4eot(ipu_plane->dmfc, ALIGN(drm_rect_width(dst), 8));
 
        width = ipu_src_rect_width(new_state);
        height = drm_rect_height(&new_state->src) >> 16;
index 96ea1a2..f54392e 100644 (file)
@@ -203,6 +203,7 @@ static irqreturn_t handle_lcd_irq(struct drm_device *dev)
        unsigned long status, val, val1;
        int plane_id, dma0_state, dma1_state;
        struct kmb_drm_private *kmb = to_kmb(dev);
+       u32 ctrl = 0;
 
        status = kmb_read_lcd(kmb, LCD_INT_STATUS);
 
@@ -227,6 +228,19 @@ static irqreturn_t handle_lcd_irq(struct drm_device *dev)
                                kmb_clr_bitmask_lcd(kmb, LCD_CONTROL,
                                                    kmb->plane_status[plane_id].ctrl);
 
+                               ctrl = kmb_read_lcd(kmb, LCD_CONTROL);
+                               if (!(ctrl & (LCD_CTRL_VL1_ENABLE |
+                                   LCD_CTRL_VL2_ENABLE |
+                                   LCD_CTRL_GL1_ENABLE |
+                                   LCD_CTRL_GL2_ENABLE))) {
+                                       /* If no LCD layers are using DMA,
+                                        * then disable DMA pipelined AXI read
+                                        * transactions.
+                                        */
+                                       kmb_clr_bitmask_lcd(kmb, LCD_CONTROL,
+                                                           LCD_CTRL_PIPELINE_DMA);
+                               }
+
                                kmb->plane_status[plane_id].disable = false;
                        }
                }
@@ -411,10 +425,10 @@ static const struct drm_driver kmb_driver = {
        .fops = &fops,
        DRM_GEM_CMA_DRIVER_OPS_VMAP,
        .name = "kmb-drm",
-       .desc = "KEEMBAY DISPLAY DRIVER ",
-       .date = "20201008",
-       .major = 1,
-       .minor = 0,
+       .desc = "KEEMBAY DISPLAY DRIVER",
+       .date = DRIVER_DATE,
+       .major = DRIVER_MAJOR,
+       .minor = DRIVER_MINOR,
 };
 
 static int kmb_remove(struct platform_device *pdev)
index 02e8067..ebbaa5f 100644 (file)
 #define KMB_MAX_HEIGHT                 1080 /*Max height in pixels */
 #define KMB_MIN_WIDTH                   1920 /*Max width in pixels */
 #define KMB_MIN_HEIGHT                  1080 /*Max height in pixels */
+
+#define DRIVER_DATE                    "20210223"
+#define DRIVER_MAJOR                   1
+#define DRIVER_MINOR                   1
+
 #define KMB_LCD_DEFAULT_CLK            200000000
 #define KMB_SYS_CLK_MHZ                        500
 
index d5b6195..ecee678 100644 (file)
@@ -427,8 +427,14 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
 
        kmb_set_bitmask_lcd(kmb, LCD_CONTROL, ctrl);
 
-       /* FIXME no doc on how to set output format,these values are
-        * taken from the Myriadx tests
+       /* Enable pipeline AXI read transactions for the DMA
+        * after setting graphics layers. This must be done
+        * in a separate write cycle.
+        */
+       kmb_set_bitmask_lcd(kmb, LCD_CONTROL, LCD_CTRL_PIPELINE_DMA);
+
+       /* FIXME no doc on how to set output format, these values are taken
+        * from the Myriadx tests
         */
        out_format |= LCD_OUTF_FORMAT_RGB888;
 
@@ -526,6 +532,11 @@ struct kmb_plane *kmb_plane_init(struct drm_device *drm)
                plane->id = i;
        }
 
+       /* Disable pipeline AXI read transactions for the DMA
+        * prior to setting graphics layers
+        */
+       kmb_clr_bitmask_lcd(kmb, LCD_CONTROL, LCD_CTRL_PIPELINE_DMA);
+
        return primary;
 cleanup:
        drmm_kfree(drm, plane);
index 6f4c80b..473f5bb 100644 (file)
@@ -133,6 +133,8 @@ static int mtk_disp_color_probe(struct platform_device *pdev)
 
 static int mtk_disp_color_remove(struct platform_device *pdev)
 {
+       component_del(&pdev->dev, &mtk_disp_color_component_ops);
+
        return 0;
 }
 
index fa9d799..5326989 100644 (file)
@@ -423,6 +423,8 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
 
 static int mtk_disp_ovl_remove(struct platform_device *pdev)
 {
+       component_del(&pdev->dev, &mtk_disp_ovl_component_ops);
+
        return 0;
 }
 
index bced555..e94738f 100644 (file)
@@ -605,11 +605,15 @@ static int mtk_dpi_bridge_atomic_check(struct drm_bridge *bridge,
                                       struct drm_crtc_state *crtc_state,
                                       struct drm_connector_state *conn_state)
 {
-       struct mtk_dpi *dpi = bridge->driver_private;
+       struct mtk_dpi *dpi = bridge_to_dpi(bridge);
        unsigned int out_bus_format;
 
        out_bus_format = bridge_state->output_bus_cfg.format;
 
+       if (out_bus_format == MEDIA_BUS_FMT_FIXED)
+               if (dpi->conf->num_output_fmts)
+                       out_bus_format = dpi->conf->output_fmts[0];
+
        dev_dbg(dpi->dev, "input format 0x%04x, output format 0x%04x\n",
                bridge_state->input_bus_cfg.format,
                bridge_state->output_bus_cfg.format);
index 474efb8..735efe7 100644 (file)
@@ -532,13 +532,10 @@ void mtk_drm_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane,
                               struct drm_atomic_state *state)
 {
        struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
-       const struct drm_plane_helper_funcs *plane_helper_funcs =
-                       plane->helper_private;
 
        if (!mtk_crtc->enabled)
                return;
 
-       plane_helper_funcs->atomic_update(plane, state);
        mtk_drm_crtc_update_config(mtk_crtc, false);
 }
 
index 75bc00e..50d2056 100644 (file)
@@ -34,6 +34,7 @@
 
 #define DISP_AAL_EN                            0x0000
 #define DISP_AAL_SIZE                          0x0030
+#define DISP_AAL_OUTPUT_SIZE                   0x04d8
 
 #define DISP_DITHER_EN                         0x0000
 #define DITHER_EN                              BIT(0)
@@ -197,6 +198,7 @@ static void mtk_aal_config(struct device *dev, unsigned int w,
        struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
 
        mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_AAL_SIZE);
+       mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_AAL_OUTPUT_SIZE);
 }
 
 static void mtk_aal_gamma_set(struct device *dev, struct drm_crtc_state *state)
index b5582dc..e6dcb34 100644 (file)
@@ -110,6 +110,35 @@ static int mtk_plane_atomic_async_check(struct drm_plane *plane,
                                                   true, true);
 }
 
+static void mtk_plane_update_new_state(struct drm_plane_state *new_state,
+                                      struct mtk_plane_state *mtk_plane_state)
+{
+       struct drm_framebuffer *fb = new_state->fb;
+       struct drm_gem_object *gem;
+       struct mtk_drm_gem_obj *mtk_gem;
+       unsigned int pitch, format;
+       dma_addr_t addr;
+
+       gem = fb->obj[0];
+       mtk_gem = to_mtk_gem_obj(gem);
+       addr = mtk_gem->dma_addr;
+       pitch = fb->pitches[0];
+       format = fb->format->format;
+
+       addr += (new_state->src.x1 >> 16) * fb->format->cpp[0];
+       addr += (new_state->src.y1 >> 16) * pitch;
+
+       mtk_plane_state->pending.enable = true;
+       mtk_plane_state->pending.pitch = pitch;
+       mtk_plane_state->pending.format = format;
+       mtk_plane_state->pending.addr = addr;
+       mtk_plane_state->pending.x = new_state->dst.x1;
+       mtk_plane_state->pending.y = new_state->dst.y1;
+       mtk_plane_state->pending.width = drm_rect_width(&new_state->dst);
+       mtk_plane_state->pending.height = drm_rect_height(&new_state->dst);
+       mtk_plane_state->pending.rotation = new_state->rotation;
+}
+
 static void mtk_plane_atomic_async_update(struct drm_plane *plane,
                                          struct drm_atomic_state *state)
 {
@@ -126,8 +155,10 @@ static void mtk_plane_atomic_async_update(struct drm_plane *plane,
        plane->state->src_h = new_state->src_h;
        plane->state->src_w = new_state->src_w;
        swap(plane->state->fb, new_state->fb);
-       new_plane_state->pending.async_dirty = true;
 
+       mtk_plane_update_new_state(new_state, new_plane_state);
+       wmb(); /* Make sure the above parameters are set before update */
+       new_plane_state->pending.async_dirty = true;
        mtk_drm_crtc_async_update(new_state->crtc, plane, state);
 }
 
@@ -189,14 +220,8 @@ static void mtk_plane_atomic_update(struct drm_plane *plane,
        struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
                                                                           plane);
        struct mtk_plane_state *mtk_plane_state = to_mtk_plane_state(new_state);
-       struct drm_crtc *crtc = new_state->crtc;
-       struct drm_framebuffer *fb = new_state->fb;
-       struct drm_gem_object *gem;
-       struct mtk_drm_gem_obj *mtk_gem;
-       unsigned int pitch, format;
-       dma_addr_t addr;
 
-       if (!crtc || WARN_ON(!fb))
+       if (!new_state->crtc || WARN_ON(!new_state->fb))
                return;
 
        if (!new_state->visible) {
@@ -204,24 +229,7 @@ static void mtk_plane_atomic_update(struct drm_plane *plane,
                return;
        }
 
-       gem = fb->obj[0];
-       mtk_gem = to_mtk_gem_obj(gem);
-       addr = mtk_gem->dma_addr;
-       pitch = fb->pitches[0];
-       format = fb->format->format;
-
-       addr += (new_state->src.x1 >> 16) * fb->format->cpp[0];
-       addr += (new_state->src.y1 >> 16) * pitch;
-
-       mtk_plane_state->pending.enable = true;
-       mtk_plane_state->pending.pitch = pitch;
-       mtk_plane_state->pending.format = format;
-       mtk_plane_state->pending.addr = addr;
-       mtk_plane_state->pending.x = new_state->dst.x1;
-       mtk_plane_state->pending.y = new_state->dst.y1;
-       mtk_plane_state->pending.width = drm_rect_width(&new_state->dst);
-       mtk_plane_state->pending.height = drm_rect_height(&new_state->dst);
-       mtk_plane_state->pending.rotation = new_state->rotation;
+       mtk_plane_update_new_state(new_state, mtk_plane_state);
        wmb(); /* Make sure the above parameters are set before update */
        mtk_plane_state->pending.dirty = true;
 }
index 446e796..0f3cafa 100644 (file)
 #define VPP_WRAP_OSD3_MATRIX_PRE_OFFSET2 0x3dbc
 #define VPP_WRAP_OSD3_MATRIX_EN_CTRL 0x3dbd
 
+/* osd1 HDR */
+#define OSD1_HDR2_CTRL 0x38a0
+#define OSD1_HDR2_CTRL_VDIN0_HDR2_TOP_EN       BIT(13)
+#define OSD1_HDR2_CTRL_REG_ONLY_MAT            BIT(16)
+
 /* osd2 scaler */
 #define OSD2_VSC_PHASE_STEP 0x3d00
 #define OSD2_VSC_INI_PHASE 0x3d01
index aede0c6..259f3e6 100644 (file)
@@ -425,9 +425,14 @@ void meson_viu_init(struct meson_drm *priv)
        if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
            meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL))
                meson_viu_load_matrix(priv);
-       else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
+       else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
                meson_viu_set_g12a_osd1_matrix(priv, RGB709_to_YUV709l_coeff,
                                               true);
+               /* fix green/pink color distortion from vendor u-boot */
+               writel_bits_relaxed(OSD1_HDR2_CTRL_REG_ONLY_MAT |
+                               OSD1_HDR2_CTRL_VDIN0_HDR2_TOP_EN, 0,
+                               priv->io_base + _REG(OSD1_HDR2_CTRL));
+       }
 
        /* Initialize OSD1 fifo control register */
        reg = VIU_OSD_DDR_PRIORITY_URGENT |
index d01c4c9..704dace 100644 (file)
@@ -296,7 +296,7 @@ static const struct dpu_mdp_cfg sc7180_mdp[] = {
 static const struct dpu_mdp_cfg sm8250_mdp[] = {
        {
        .name = "top_0", .id = MDP_TOP,
-       .base = 0x0, .len = 0x45C,
+       .base = 0x0, .len = 0x494,
        .features = 0,
        .highest_bank_bit = 0x3, /* TODO: 2 for LP_DDR4 */
        .clk_ctrls[DPU_CLK_CTRL_VIG0] = {
index 6b0a7bc..b466784 100644 (file)
@@ -45,20 +45,13 @@ static void dpu_mdss_irq(struct irq_desc *desc)
 
        while (interrupts) {
                irq_hw_number_t hwirq = fls(interrupts) - 1;
-               unsigned int mapping;
                int rc;
 
-               mapping = irq_find_mapping(dpu_mdss->irq_controller.domain,
-                                          hwirq);
-               if (mapping == 0) {
-                       DRM_ERROR("couldn't find irq mapping for %lu\n", hwirq);
-                       break;
-               }
-
-               rc = generic_handle_irq(mapping);
+               rc = generic_handle_domain_irq(dpu_mdss->irq_controller.domain,
+                                              hwirq);
                if (rc < 0) {
-                       DRM_ERROR("handle irq fail: irq=%lu mapping=%u rc=%d\n",
-                                 hwirq, mapping, rc);
+                       DRM_ERROR("handle irq fail: irq=%lu rc=%d\n",
+                                 hwirq, rc);
                        break;
                }
 
index 09bd46a..2f4895b 100644 (file)
@@ -50,8 +50,7 @@ static irqreturn_t mdss_irq(int irq, void *arg)
        while (intr) {
                irq_hw_number_t hwirq = fls(intr) - 1;
 
-               generic_handle_irq(irq_find_mapping(
-                               mdp5_mdss->irqcontroller.domain, hwirq));
+               generic_handle_domain_irq(mdp5_mdss->irqcontroller.domain, hwirq);
                intr &= ~(1 << hwirq);
        }
 
index ca96e35..c0423e7 100644 (file)
@@ -771,6 +771,7 @@ int dp_catalog_panel_timing_cfg(struct dp_catalog *dp_catalog)
        dp_write_link(catalog, REG_DP_HSYNC_VSYNC_WIDTH_POLARITY,
                                dp_catalog->width_blanking);
        dp_write_link(catalog, REG_DP_ACTIVE_HOR_VER, dp_catalog->dp_active);
+       dp_write_p0(catalog, MMSS_DP_INTF_CONFIG, 0);
        return 0;
 }
 
index ee221d8..eaddfd7 100644 (file)
@@ -1526,7 +1526,7 @@ static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl)
         * running. Add the global reset just before disabling the
         * link clocks and core clocks.
         */
-       ret = dp_ctrl_off(&ctrl->dp_ctrl);
+       ret = dp_ctrl_off_link_stream(&ctrl->dp_ctrl);
        if (ret) {
                DRM_ERROR("failed to disable DP controller\n");
                return ret;
index 051c1be..867388a 100644 (file)
@@ -219,6 +219,7 @@ static int dp_display_bind(struct device *dev, struct device *master,
                goto end;
        }
 
+       dp->aux->drm_dev = drm;
        rc = dp_aux_register(dp->aux);
        if (rc) {
                DRM_ERROR("DRM DP AUX register failed\n");
@@ -1311,6 +1312,10 @@ static int dp_pm_resume(struct device *dev)
        else
                dp->dp_display.is_connected = false;
 
+       dp_display_handle_plugged_change(g_dp_display,
+                               dp->dp_display.is_connected);
+
+
        mutex_unlock(&dp->event_mutex);
 
        return 0;
index eed2a76..bcaddbb 100644 (file)
@@ -142,6 +142,9 @@ static const struct iommu_flush_ops null_tlb_ops = {
        .tlb_add_page = msm_iommu_tlb_add_page,
 };
 
+static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
+               unsigned long iova, int flags, void *arg);
+
 struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
 {
        struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(parent->dev);
@@ -157,6 +160,13 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
        if (!ttbr1_cfg)
                return ERR_PTR(-ENODEV);
 
+       /*
+        * Defer setting the fault handler until we have a valid adreno_smmu
+        * to avoid accidentially installing a GPU specific fault handler for
+        * the display's iommu
+        */
+       iommu_set_fault_handler(iommu->domain, msm_fault_handler, iommu);
+
        pagetable = kzalloc(sizeof(*pagetable), GFP_KERNEL);
        if (!pagetable)
                return ERR_PTR(-ENOMEM);
@@ -300,7 +310,6 @@ struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
 
        iommu->domain = domain;
        msm_mmu_init(&iommu->base, dev, &funcs, MSM_MMU_IOMMU);
-       iommu_set_fault_handler(domain, msm_fault_handler, iommu);
 
        atomic_set(&iommu->pagetables, 0);
 
index f949767..bcb0310 100644 (file)
@@ -2237,6 +2237,33 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
                interlock[NV50_DISP_INTERLOCK_CORE] = 0;
        }
 
+       /* Finish updating head(s)...
+        *
+        * NVD is rather picky about both where window assignments can change,
+        * *and* about certain core and window channel states matching.
+        *
+        * The EFI GOP driver on newer GPUs configures window channels with a
+        * different output format to what we do, and the core channel update
+        * in the assign_windows case above would result in a state mismatch.
+        *
+        * Delay some of the head update until after that point to workaround
+        * the issue.  This only affects the initial modeset.
+        *
+        * TODO: handle this better when adding flexible window mapping
+        */
+       for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+               struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
+               struct nv50_head *head = nv50_head(crtc);
+
+               NV_ATOMIC(drm, "%s: set %04x (clr %04x)\n", crtc->name,
+                         asyh->set.mask, asyh->clr.mask);
+
+               if (asyh->set.mask) {
+                       nv50_head_flush_set_wndw(head, asyh);
+                       interlock[NV50_DISP_INTERLOCK_CORE] = 1;
+               }
+       }
+
        /* Update plane(s). */
        for_each_new_plane_in_state(state, plane, new_plane_state, i) {
                struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
index ec361d1..d66f972 100644 (file)
@@ -50,11 +50,8 @@ nv50_head_flush_clr(struct nv50_head *head,
 }
 
 void
-nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+nv50_head_flush_set_wndw(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
-       if (asyh->set.view   ) head->func->view    (head, asyh);
-       if (asyh->set.mode   ) head->func->mode    (head, asyh);
-       if (asyh->set.core   ) head->func->core_set(head, asyh);
        if (asyh->set.olut   ) {
                asyh->olut.offset = nv50_lut_load(&head->olut,
                                                  asyh->olut.buffer,
@@ -62,6 +59,14 @@ nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
                                                  asyh->olut.load);
                head->func->olut_set(head, asyh);
        }
+}
+
+void
+nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+       if (asyh->set.view   ) head->func->view    (head, asyh);
+       if (asyh->set.mode   ) head->func->mode    (head, asyh);
+       if (asyh->set.core   ) head->func->core_set(head, asyh);
        if (asyh->set.curs   ) head->func->curs_set(head, asyh);
        if (asyh->set.base   ) head->func->base    (head, asyh);
        if (asyh->set.ovly   ) head->func->ovly    (head, asyh);
index dae841d..0bac6be 100644 (file)
@@ -21,6 +21,7 @@ struct nv50_head {
 
 struct nv50_head *nv50_head_create(struct drm_device *, int index);
 void nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh);
+void nv50_head_flush_set_wndw(struct nv50_head *head, struct nv50_head_atom *asyh);
 void nv50_head_flush_clr(struct nv50_head *head,
                         struct nv50_head_atom *asyh, bool flush);
 
index 0b86c44..59759c4 100644 (file)
@@ -4,7 +4,8 @@
 
 struct nv_device_v0 {
        __u8  version;
-       __u8  pad01[7];
+       __u8  priv;
+       __u8  pad02[6];
        __u64 device;   /* device identifier, ~0 for client default */
 };
 
index ba2c28e..c68cc95 100644 (file)
@@ -61,8 +61,6 @@
 #define NV10_CHANNEL_DMA                              /* cl506b.h */ 0x0000006e
 #define NV17_CHANNEL_DMA                              /* cl506b.h */ 0x0000176e
 #define NV40_CHANNEL_DMA                              /* cl506b.h */ 0x0000406e
-#define NV50_CHANNEL_DMA                              /* cl506e.h */ 0x0000506e
-#define G82_CHANNEL_DMA                               /* cl826e.h */ 0x0000826e
 
 #define NV50_CHANNEL_GPFIFO                           /* cl506f.h */ 0x0000506f
 #define G82_CHANNEL_GPFIFO                            /* cl826f.h */ 0x0000826f
index 347d2c0..5d9395e 100644 (file)
@@ -9,7 +9,6 @@ struct nvif_client {
        const struct nvif_driver *driver;
        u64 version;
        u8 route;
-       bool super;
 };
 
 int  nvif_client_ctor(struct nvif_client *parent, const char *name, u64 device,
index 8e85b93..7a3af05 100644 (file)
@@ -11,7 +11,7 @@ struct nvif_driver {
        void (*fini)(void *priv);
        int (*suspend)(void *priv);
        int (*resume)(void *priv);
-       int (*ioctl)(void *priv, bool super, void *data, u32 size, void **hack);
+       int (*ioctl)(void *priv, void *data, u32 size, void **hack);
        void __iomem *(*map)(void *priv, u64 handle, u32 size);
        void (*unmap)(void *priv, void __iomem *ptr, u32 size);
        bool keep;
index 5d7017f..2f86606 100644 (file)
@@ -13,7 +13,6 @@ struct nvkm_client {
        struct nvkm_client_notify *notify[32];
        struct rb_root objroot;
 
-       bool super;
        void *data;
        int (*ntfy)(const void *, u32, const void *, u32);
 
index 71ed147..f52918a 100644 (file)
@@ -4,5 +4,5 @@
 #include <core/os.h>
 struct nvkm_client;
 
-int nvkm_ioctl(struct nvkm_client *, bool, void *, u32, void **);
+int nvkm_ioctl(struct nvkm_client *, void *, u32, void **);
 #endif
index 0911e73..70e7887 100644 (file)
@@ -15,7 +15,6 @@ struct nvkm_vma {
        u8   refd:3; /* Current page type (index, or NONE for unreferenced). */
        bool used:1; /* Region allocated. */
        bool part:1; /* Region was split from an allocated region by map(). */
-       bool user:1; /* Region user-allocated. */
        bool busy:1; /* Region busy (for temporarily preventing user access). */
        bool mapped:1; /* Region contains valid pages. */
        struct nvkm_memory *memory; /* Memory currently mapped into VMA. */
index b45ec30..4107b70 100644 (file)
@@ -570,11 +570,9 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
        }
 
        client->route = NVDRM_OBJECT_ABI16;
-       client->super = true;
        ret = nvif_object_ctor(&chan->chan->user, "abi16Ntfy", info->handle,
                               NV_DMA_IN_MEMORY, &args, sizeof(args),
                               &ntfy->object);
-       client->super = false;
        client->route = NVDRM_OBJECT_NVIF;
        if (ret)
                goto done;
index 4036260..80099ef 100644 (file)
@@ -86,12 +86,6 @@ nouveau_channel_del(struct nouveau_channel **pchan)
        struct nouveau_channel *chan = *pchan;
        if (chan) {
                struct nouveau_cli *cli = (void *)chan->user.client;
-               bool super;
-
-               if (cli) {
-                       super = cli->base.super;
-                       cli->base.super = true;
-               }
 
                if (chan->fence)
                        nouveau_fence(chan->drm)->context_del(chan);
@@ -111,9 +105,6 @@ nouveau_channel_del(struct nouveau_channel **pchan)
                        nouveau_bo_unpin(chan->push.buffer);
                nouveau_bo_ref(NULL, &chan->push.buffer);
                kfree(chan);
-
-               if (cli)
-                       cli->base.super = super;
        }
        *pchan = NULL;
 }
@@ -512,20 +503,16 @@ nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
                    struct nouveau_channel **pchan)
 {
        struct nouveau_cli *cli = (void *)device->object.client;
-       bool super;
        int ret;
 
        /* hack until fencenv50 is fixed, and agp access relaxed */
-       super = cli->base.super;
-       cli->base.super = true;
-
        ret = nouveau_channel_ind(drm, device, arg0, priv, pchan);
        if (ret) {
                NV_PRINTK(dbg, cli, "ib channel create, %d\n", ret);
                ret = nouveau_channel_dma(drm, device, pchan);
                if (ret) {
                        NV_PRINTK(dbg, cli, "dma channel create, %d\n", ret);
-                       goto done;
+                       return ret;
                }
        }
 
@@ -533,15 +520,13 @@ nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
        if (ret) {
                NV_PRINTK(err, cli, "channel failed to initialise, %d\n", ret);
                nouveau_channel_del(pchan);
-               goto done;
+               return ret;
        }
 
        ret = nouveau_svmm_join((*pchan)->vmm->svmm, (*pchan)->inst);
        if (ret)
                nouveau_channel_del(pchan);
 
-done:
-       cli->base.super = super;
        return ret;
 }
 
index a616cf4..ba4cd5f 100644 (file)
@@ -244,6 +244,7 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
        ret = nvif_device_ctor(&cli->base.object, "drmDevice", 0, NV_DEVICE,
                               &(struct nv_device_v0) {
                                        .device = ~0,
+                                       .priv = true,
                               }, sizeof(struct nv_device_v0),
                               &cli->device);
        if (ret) {
@@ -1086,8 +1087,6 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
        if (ret)
                goto done;
 
-       cli->base.super = false;
-
        fpriv->driver_priv = cli;
 
        mutex_lock(&drm->client.mutex);
index 0de6549..2ca3207 100644 (file)
@@ -41,8 +41,6 @@ nouveau_mem_map(struct nouveau_mem *mem,
                struct gf100_vmm_map_v0 gf100;
        } args;
        u32 argc = 0;
-       bool super;
-       int ret;
 
        switch (vmm->object.oclass) {
        case NVIF_CLASS_VMM_NV04:
@@ -73,12 +71,7 @@ nouveau_mem_map(struct nouveau_mem *mem,
                return -ENOSYS;
        }
 
-       super = vmm->object.client->super;
-       vmm->object.client->super = true;
-       ret = nvif_vmm_map(vmm, vma->addr, mem->mem.size, &args, argc,
-                          &mem->mem, 0);
-       vmm->object.client->super = super;
-       return ret;
+       return nvif_vmm_map(vmm, vma->addr, mem->mem.size, &args, argc, &mem->mem, 0);
 }
 
 void
@@ -99,7 +92,6 @@ nouveau_mem_host(struct ttm_resource *reg, struct ttm_tt *tt)
        struct nouveau_drm *drm = cli->drm;
        struct nvif_mmu *mmu = &cli->mmu;
        struct nvif_mem_ram_v0 args = {};
-       bool super = cli->base.super;
        u8 type;
        int ret;
 
@@ -122,11 +114,9 @@ nouveau_mem_host(struct ttm_resource *reg, struct ttm_tt *tt)
                args.dma = tt->dma_address;
 
        mutex_lock(&drm->master.lock);
-       cli->base.super = true;
        ret = nvif_mem_ctor_type(mmu, "ttmHostMem", cli->mem->oclass, type, PAGE_SHIFT,
                                 reg->num_pages << PAGE_SHIFT,
                                 &args, sizeof(args), &mem->mem);
-       cli->base.super = super;
        mutex_unlock(&drm->master.lock);
        return ret;
 }
@@ -138,12 +128,10 @@ nouveau_mem_vram(struct ttm_resource *reg, bool contig, u8 page)
        struct nouveau_cli *cli = mem->cli;
        struct nouveau_drm *drm = cli->drm;
        struct nvif_mmu *mmu = &cli->mmu;
-       bool super = cli->base.super;
        u64 size = ALIGN(reg->num_pages << PAGE_SHIFT, 1 << page);
        int ret;
 
        mutex_lock(&drm->master.lock);
-       cli->base.super = true;
        switch (cli->mem->oclass) {
        case NVIF_CLASS_MEM_GF100:
                ret = nvif_mem_ctor_type(mmu, "ttmVram", cli->mem->oclass,
@@ -167,7 +155,6 @@ nouveau_mem_vram(struct ttm_resource *reg, bool contig, u8 page)
                WARN_ON(1);
                break;
        }
-       cli->base.super = super;
        mutex_unlock(&drm->master.lock);
 
        reg->start = mem->mem.addr >> PAGE_SHIFT;
index b3f29b1..52f5793 100644 (file)
@@ -52,9 +52,9 @@ nvkm_client_map(void *priv, u64 handle, u32 size)
 }
 
 static int
-nvkm_client_ioctl(void *priv, bool super, void *data, u32 size, void **hack)
+nvkm_client_ioctl(void *priv, void *data, u32 size, void **hack)
 {
-       return nvkm_ioctl(priv, super, data, size, hack);
+       return nvkm_ioctl(priv, data, size, hack);
 }
 
 static int
index 82b583f..b0c3422 100644 (file)
@@ -237,14 +237,11 @@ void
 nouveau_svmm_invalidate(struct nouveau_svmm *svmm, u64 start, u64 limit)
 {
        if (limit > start) {
-               bool super = svmm->vmm->vmm.object.client->super;
-               svmm->vmm->vmm.object.client->super = true;
                nvif_object_mthd(&svmm->vmm->vmm.object, NVIF_VMM_V0_PFNCLR,
                                 &(struct nvif_vmm_pfnclr_v0) {
                                        .addr = start,
                                        .size = limit - start,
                                 }, sizeof(struct nvif_vmm_pfnclr_v0));
-               svmm->vmm->vmm.object.client->super = super;
        }
 }
 
@@ -634,9 +631,7 @@ static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm,
                NVIF_VMM_PFNMAP_V0_A |
                NVIF_VMM_PFNMAP_V0_HOST;
 
-       svmm->vmm->vmm.object.client->super = true;
        ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL);
-       svmm->vmm->vmm.object.client->super = false;
        mutex_unlock(&svmm->mutex);
 
        unlock_page(page);
@@ -702,9 +697,7 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm,
 
        nouveau_hmm_convert_pfn(drm, &range, args);
 
-       svmm->vmm->vmm.object.client->super = true;
        ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL);
-       svmm->vmm->vmm.object.client->super = false;
        mutex_unlock(&svmm->mutex);
 
 out:
@@ -928,10 +921,8 @@ nouveau_pfns_map(struct nouveau_svmm *svmm, struct mm_struct *mm,
 
        mutex_lock(&svmm->mutex);
 
-       svmm->vmm->vmm.object.client->super = true;
        ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, sizeof(*args) +
                                npages * sizeof(args->p.phys[0]), NULL);
-       svmm->vmm->vmm.object.client->super = false;
 
        mutex_unlock(&svmm->mutex);
 }
index 9dc10b1..5da1f4d 100644 (file)
@@ -32,6 +32,9 @@
 #include <nvif/event.h>
 #include <nvif/ioctl.h>
 
+#include <nvif/class.h>
+#include <nvif/cl0080.h>
+
 struct usif_notify_p {
        struct drm_pending_event base;
        struct {
@@ -261,7 +264,7 @@ usif_object_dtor(struct usif_object *object)
 }
 
 static int
-usif_object_new(struct drm_file *f, void *data, u32 size, void *argv, u32 argc)
+usif_object_new(struct drm_file *f, void *data, u32 size, void *argv, u32 argc, bool parent_abi16)
 {
        struct nouveau_cli *cli = nouveau_cli(f);
        struct nvif_client *client = &cli->base;
@@ -271,23 +274,48 @@ usif_object_new(struct drm_file *f, void *data, u32 size, void *argv, u32 argc)
        struct usif_object *object;
        int ret = -ENOSYS;
 
+       if ((ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true)))
+               return ret;
+
+       switch (args->v0.oclass) {
+       case NV_DMA_FROM_MEMORY:
+       case NV_DMA_TO_MEMORY:
+       case NV_DMA_IN_MEMORY:
+               return -EINVAL;
+       case NV_DEVICE: {
+               union {
+                       struct nv_device_v0 v0;
+               } *args = data;
+
+               if ((ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false)))
+                       return ret;
+
+               args->v0.priv = false;
+               break;
+       }
+       default:
+               if (!parent_abi16)
+                       return -EINVAL;
+               break;
+       }
+
        if (!(object = kmalloc(sizeof(*object), GFP_KERNEL)))
                return -ENOMEM;
        list_add(&object->head, &cli->objects);
 
-       if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
-               object->route = args->v0.route;
-               object->token = args->v0.token;
-               args->v0.route = NVDRM_OBJECT_USIF;
-               args->v0.token = (unsigned long)(void *)object;
-               ret = nvif_client_ioctl(client, argv, argc);
-               args->v0.token = object->token;
-               args->v0.route = object->route;
+       object->route = args->v0.route;
+       object->token = args->v0.token;
+       args->v0.route = NVDRM_OBJECT_USIF;
+       args->v0.token = (unsigned long)(void *)object;
+       ret = nvif_client_ioctl(client, argv, argc);
+       if (ret) {
+               usif_object_dtor(object);
+               return ret;
        }
 
-       if (ret)
-               usif_object_dtor(object);
-       return ret;
+       args->v0.token = object->token;
+       args->v0.route = object->route;
+       return 0;
 }
 
 int
@@ -301,6 +329,7 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc)
                struct nvif_ioctl_v0 v0;
        } *argv = data;
        struct usif_object *object;
+       bool abi16 = false;
        u8 owner;
        int ret;
 
@@ -331,11 +360,13 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc)
                        mutex_unlock(&cli->mutex);
                        goto done;
                }
+
+               abi16 = true;
        }
 
        switch (argv->v0.type) {
        case NVIF_IOCTL_V0_NEW:
-               ret = usif_object_new(filp, data, size, argv, argc);
+               ret = usif_object_new(filp, data, size, argv, argc, abi16);
                break;
        case NVIF_IOCTL_V0_NTFY_NEW:
                ret = usif_notify_new(filp, data, size, argv, argc);
index 12644f8..a3264a0 100644 (file)
@@ -32,7 +32,7 @@
 int
 nvif_client_ioctl(struct nvif_client *client, void *data, u32 size)
 {
-       return client->driver->ioctl(client->object.priv, client->super, data, size, NULL);
+       return client->driver->ioctl(client->object.priv, data, size, NULL);
 }
 
 int
@@ -80,7 +80,6 @@ nvif_client_ctor(struct nvif_client *parent, const char *name, u64 device,
        client->object.client = client;
        client->object.handle = ~0;
        client->route = NVIF_IOCTL_V0_ROUTE_NVIF;
-       client->super = true;
        client->driver = parent->driver;
 
        if (ret == 0) {
index 671a5c0..dce1ece 100644 (file)
@@ -44,8 +44,7 @@ nvif_object_ioctl(struct nvif_object *object, void *data, u32 size, void **hack)
        } else
                return -ENOSYS;
 
-       return client->driver->ioctl(client->object.priv, client->super,
-                                    data, size, hack);
+       return client->driver->ioctl(client->object.priv, data, size, hack);
 }
 
 void
index d777df5..735cb68 100644 (file)
@@ -426,8 +426,7 @@ nvkm_ioctl_path(struct nvkm_client *client, u64 handle, u32 type,
 }
 
 int
-nvkm_ioctl(struct nvkm_client *client, bool supervisor,
-          void *data, u32 size, void **hack)
+nvkm_ioctl(struct nvkm_client *client, void *data, u32 size, void **hack)
 {
        struct nvkm_object *object = &client->object;
        union {
@@ -435,7 +434,6 @@ nvkm_ioctl(struct nvkm_client *client, bool supervisor,
        } *args = data;
        int ret = -ENOSYS;
 
-       client->super = supervisor;
        nvif_ioctl(object, "size %d\n", size);
 
        if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
index b930f53..93ddf63 100644 (file)
@@ -2624,6 +2624,26 @@ nv174_chipset = {
        .dma      = { 0x00000001, gv100_dma_new },
 };
 
+static const struct nvkm_device_chip
+nv177_chipset = {
+       .name = "GA107",
+       .bar      = { 0x00000001, tu102_bar_new },
+       .bios     = { 0x00000001, nvkm_bios_new },
+       .devinit  = { 0x00000001, ga100_devinit_new },
+       .fb       = { 0x00000001, ga102_fb_new },
+       .gpio     = { 0x00000001, ga102_gpio_new },
+       .i2c      = { 0x00000001, gm200_i2c_new },
+       .imem     = { 0x00000001, nv50_instmem_new },
+       .mc       = { 0x00000001, ga100_mc_new },
+       .mmu      = { 0x00000001, tu102_mmu_new },
+       .pci      = { 0x00000001, gp100_pci_new },
+       .privring = { 0x00000001, gm200_privring_new },
+       .timer    = { 0x00000001, gk20a_timer_new },
+       .top      = { 0x00000001, ga100_top_new },
+       .disp     = { 0x00000001, ga102_disp_new },
+       .dma      = { 0x00000001, gv100_dma_new },
+};
+
 static int
 nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
                       struct nvkm_notify *notify)
@@ -3049,6 +3069,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
                case 0x168: device->chip = &nv168_chipset; break;
                case 0x172: device->chip = &nv172_chipset; break;
                case 0x174: device->chip = &nv174_chipset; break;
+               case 0x177: device->chip = &nv177_chipset; break;
                default:
                        if (nvkm_boolopt(device->cfgopt, "NvEnableUnsupportedChipsets", false)) {
                                switch (device->chipset) {
index fea9d8f..f28894f 100644 (file)
@@ -397,7 +397,7 @@ nvkm_udevice_new(const struct nvkm_oclass *oclass, void *data, u32 size,
                return ret;
 
        /* give priviledged clients register access */
-       if (client->super)
+       if (args->v0.priv)
                func = &nvkm_udevice_super;
        else
                func = &nvkm_udevice;
index 55fbfe2..9669472 100644 (file)
@@ -440,7 +440,7 @@ nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps)
        return ret;
 }
 
-static void
+void
 nvkm_dp_disable(struct nvkm_outp *outp, struct nvkm_ior *ior)
 {
        struct nvkm_dp *dp = nvkm_dp(outp);
index 428b3f4..e484d0c 100644 (file)
@@ -32,6 +32,7 @@ struct nvkm_dp {
 
 int nvkm_dp_new(struct nvkm_disp *, int index, struct dcb_output *,
                struct nvkm_outp **);
+void nvkm_dp_disable(struct nvkm_outp *, struct nvkm_ior *);
 
 /* DPCD Receiver Capabilities */
 #define DPCD_RC00_DPCD_REV                                              0x00000
index dffcac2..129982f 100644 (file)
@@ -22,6 +22,7 @@
  * Authors: Ben Skeggs
  */
 #include "outp.h"
+#include "dp.h"
 #include "ior.h"
 
 #include <subdev/bios.h>
@@ -257,6 +258,14 @@ nvkm_outp_init_route(struct nvkm_outp *outp)
        if (!ior->arm.head || ior->arm.proto != proto) {
                OUTP_DBG(outp, "no heads (%x %d %d)", ior->arm.head,
                         ior->arm.proto, proto);
+
+               /* The EFI GOP driver on Ampere can leave unused DP links routed,
+                * which we don't expect.  The DisableLT IED script *should* get
+                * us back to where we need to be.
+                */
+               if (ior->func->route.get && !ior->arm.head && outp->info.type == DCB_OUTPUT_DP)
+                       nvkm_dp_disable(outp, ior);
+
                return;
        }
 
index d20cc06..797131e 100644 (file)
@@ -26,7 +26,6 @@
 #include <core/client.h>
 #include <core/gpuobj.h>
 #include <subdev/fb.h>
-#include <subdev/instmem.h>
 
 #include <nvif/cl0002.h>
 #include <nvif/unpack.h>
@@ -72,11 +71,7 @@ nvkm_dmaobj_ctor(const struct nvkm_dmaobj_func *func, struct nvkm_dma *dma,
        union {
                struct nv_dma_v0 v0;
        } *args = *pdata;
-       struct nvkm_device *device = dma->engine.subdev.device;
-       struct nvkm_client *client = oclass->client;
        struct nvkm_object *parent = oclass->parent;
-       struct nvkm_instmem *instmem = device->imem;
-       struct nvkm_fb *fb = device->fb;
        void *data = *pdata;
        u32 size = *psize;
        int ret = -ENOSYS;
@@ -109,23 +104,13 @@ nvkm_dmaobj_ctor(const struct nvkm_dmaobj_func *func, struct nvkm_dma *dma,
                dmaobj->target = NV_MEM_TARGET_VM;
                break;
        case NV_DMA_V0_TARGET_VRAM:
-               if (!client->super) {
-                       if (dmaobj->limit >= fb->ram->size - instmem->reserved)
-                               return -EACCES;
-                       if (device->card_type >= NV_50)
-                               return -EACCES;
-               }
                dmaobj->target = NV_MEM_TARGET_VRAM;
                break;
        case NV_DMA_V0_TARGET_PCI:
-               if (!client->super)
-                       return -EACCES;
                dmaobj->target = NV_MEM_TARGET_PCI;
                break;
        case NV_DMA_V0_TARGET_PCI_US:
        case NV_DMA_V0_TARGET_AGP:
-               if (!client->super)
-                       return -EACCES;
                dmaobj->target = NV_MEM_TARGET_PCI_NOSNOOP;
                break;
        default:
index 90e9a09..3209eb7 100644 (file)
@@ -27,8 +27,6 @@ nvkm-y += nvkm/engine/fifo/dmanv04.o
 nvkm-y += nvkm/engine/fifo/dmanv10.o
 nvkm-y += nvkm/engine/fifo/dmanv17.o
 nvkm-y += nvkm/engine/fifo/dmanv40.o
-nvkm-y += nvkm/engine/fifo/dmanv50.o
-nvkm-y += nvkm/engine/fifo/dmag84.o
 
 nvkm-y += nvkm/engine/fifo/gpfifonv50.o
 nvkm-y += nvkm/engine/fifo/gpfifog84.o
index af8bdf2..3a95730 100644 (file)
@@ -48,8 +48,6 @@ void nv50_fifo_chan_object_dtor(struct nvkm_fifo_chan *, int);
 int g84_fifo_chan_ctor(struct nv50_fifo *, u64 vmm, u64 push,
                       const struct nvkm_oclass *, struct nv50_fifo_chan *);
 
-extern const struct nvkm_fifo_chan_oclass nv50_fifo_dma_oclass;
 extern const struct nvkm_fifo_chan_oclass nv50_fifo_gpfifo_oclass;
-extern const struct nvkm_fifo_chan_oclass g84_fifo_dma_oclass;
 extern const struct nvkm_fifo_chan_oclass g84_fifo_gpfifo_oclass;
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c
deleted file mode 100644 (file)
index fc34cdd..0000000
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "channv50.h"
-
-#include <core/client.h>
-#include <core/ramht.h>
-
-#include <nvif/class.h>
-#include <nvif/cl826e.h>
-#include <nvif/unpack.h>
-
-static int
-g84_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
-                void *data, u32 size, struct nvkm_object **pobject)
-{
-       struct nvkm_object *parent = oclass->parent;
-       union {
-               struct g82_channel_dma_v0 v0;
-       } *args = data;
-       struct nv50_fifo *fifo = nv50_fifo(base);
-       struct nv50_fifo_chan *chan;
-       int ret = -ENOSYS;
-
-       nvif_ioctl(parent, "create channel dma size %d\n", size);
-       if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
-               nvif_ioctl(parent, "create channel dma vers %d vmm %llx "
-                                  "pushbuf %llx offset %016llx\n",
-                          args->v0.version, args->v0.vmm, args->v0.pushbuf,
-                          args->v0.offset);
-               if (!args->v0.pushbuf)
-                       return -EINVAL;
-       } else
-               return ret;
-
-       if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
-               return -ENOMEM;
-       *pobject = &chan->base.object;
-
-       ret = g84_fifo_chan_ctor(fifo, args->v0.vmm, args->v0.pushbuf,
-                                oclass, chan);
-       if (ret)
-               return ret;
-
-       args->v0.chid = chan->base.chid;
-
-       nvkm_kmap(chan->ramfc);
-       nvkm_wo32(chan->ramfc, 0x08, lower_32_bits(args->v0.offset));
-       nvkm_wo32(chan->ramfc, 0x0c, upper_32_bits(args->v0.offset));
-       nvkm_wo32(chan->ramfc, 0x10, lower_32_bits(args->v0.offset));
-       nvkm_wo32(chan->ramfc, 0x14, upper_32_bits(args->v0.offset));
-       nvkm_wo32(chan->ramfc, 0x3c, 0x003f6078);
-       nvkm_wo32(chan->ramfc, 0x44, 0x01003fff);
-       nvkm_wo32(chan->ramfc, 0x48, chan->base.push->node->offset >> 4);
-       nvkm_wo32(chan->ramfc, 0x4c, 0xffffffff);
-       nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff);
-       nvkm_wo32(chan->ramfc, 0x78, 0x00000000);
-       nvkm_wo32(chan->ramfc, 0x7c, 0x30000001);
-       nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
-                                    (4 << 24) /* SEARCH_FULL */ |
-                                    (chan->ramht->gpuobj->node->offset >> 4));
-       nvkm_wo32(chan->ramfc, 0x88, chan->cache->addr >> 10);
-       nvkm_wo32(chan->ramfc, 0x98, chan->base.inst->addr >> 12);
-       nvkm_done(chan->ramfc);
-       return 0;
-}
-
-const struct nvkm_fifo_chan_oclass
-g84_fifo_dma_oclass = {
-       .base.oclass = G82_CHANNEL_DMA,
-       .base.minver = 0,
-       .base.maxver = 0,
-       .ctor = g84_fifo_dma_new,
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c
deleted file mode 100644 (file)
index 8043718..0000000
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "channv50.h"
-
-#include <core/client.h>
-#include <core/ramht.h>
-
-#include <nvif/class.h>
-#include <nvif/cl506e.h>
-#include <nvif/unpack.h>
-
-static int
-nv50_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
-                 void *data, u32 size, struct nvkm_object **pobject)
-{
-       struct nvkm_object *parent = oclass->parent;
-       union {
-               struct nv50_channel_dma_v0 v0;
-       } *args = data;
-       struct nv50_fifo *fifo = nv50_fifo(base);
-       struct nv50_fifo_chan *chan;
-       int ret = -ENOSYS;
-
-       nvif_ioctl(parent, "create channel dma size %d\n", size);
-       if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
-               nvif_ioctl(parent, "create channel dma vers %d vmm %llx "
-                                  "pushbuf %llx offset %016llx\n",
-                          args->v0.version, args->v0.vmm, args->v0.pushbuf,
-                          args->v0.offset);
-               if (!args->v0.pushbuf)
-                       return -EINVAL;
-       } else
-               return ret;
-
-       if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
-               return -ENOMEM;
-       *pobject = &chan->base.object;
-
-       ret = nv50_fifo_chan_ctor(fifo, args->v0.vmm, args->v0.pushbuf,
-                                 oclass, chan);
-       if (ret)
-               return ret;
-
-       args->v0.chid = chan->base.chid;
-
-       nvkm_kmap(chan->ramfc);
-       nvkm_wo32(chan->ramfc, 0x08, lower_32_bits(args->v0.offset));
-       nvkm_wo32(chan->ramfc, 0x0c, upper_32_bits(args->v0.offset));
-       nvkm_wo32(chan->ramfc, 0x10, lower_32_bits(args->v0.offset));
-       nvkm_wo32(chan->ramfc, 0x14, upper_32_bits(args->v0.offset));
-       nvkm_wo32(chan->ramfc, 0x3c, 0x003f6078);
-       nvkm_wo32(chan->ramfc, 0x44, 0x01003fff);
-       nvkm_wo32(chan->ramfc, 0x48, chan->base.push->node->offset >> 4);
-       nvkm_wo32(chan->ramfc, 0x4c, 0xffffffff);
-       nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff);
-       nvkm_wo32(chan->ramfc, 0x78, 0x00000000);
-       nvkm_wo32(chan->ramfc, 0x7c, 0x30000001);
-       nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
-                                    (4 << 24) /* SEARCH_FULL */ |
-                                    (chan->ramht->gpuobj->node->offset >> 4));
-       nvkm_done(chan->ramfc);
-       return 0;
-}
-
-const struct nvkm_fifo_chan_oclass
-nv50_fifo_dma_oclass = {
-       .base.oclass = NV50_CHANNEL_DMA,
-       .base.minver = 0,
-       .base.maxver = 0,
-       .ctor = nv50_fifo_dma_new,
-};
index c0a7d0f..3885c38 100644 (file)
@@ -119,7 +119,6 @@ g84_fifo = {
        .uevent_init = g84_fifo_uevent_init,
        .uevent_fini = g84_fifo_uevent_fini,
        .chan = {
-               &g84_fifo_dma_oclass,
                &g84_fifo_gpfifo_oclass,
                NULL
        },
index b6900a5..ae6c4d8 100644 (file)
@@ -341,8 +341,6 @@ gk104_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass,
                                   "runlist %016llx priv %d\n",
                           args->v0.version, args->v0.vmm, args->v0.ioffset,
                           args->v0.ilength, args->v0.runlist, args->v0.priv);
-               if (args->v0.priv && !oclass->client->super)
-                       return -EINVAL;
                return gk104_fifo_gpfifo_new_(fifo,
                                              &args->v0.runlist,
                                              &args->v0.chid,
index ee4967b..743791c 100644 (file)
@@ -226,8 +226,6 @@ gv100_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass,
                                   "runlist %016llx priv %d\n",
                           args->v0.version, args->v0.vmm, args->v0.ioffset,
                           args->v0.ilength, args->v0.runlist, args->v0.priv);
-               if (args->v0.priv && !oclass->client->super)
-                       return -EINVAL;
                return gv100_fifo_gpfifo_new_(&gv100_fifo_gpfifo, fifo,
                                              &args->v0.runlist,
                                              &args->v0.chid,
index abef7fb..99aafa1 100644 (file)
@@ -65,8 +65,6 @@ tu102_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass,
                                   "runlist %016llx priv %d\n",
                           args->v0.version, args->v0.vmm, args->v0.ioffset,
                           args->v0.ilength, args->v0.runlist, args->v0.priv);
-               if (args->v0.priv && !oclass->client->super)
-                       return -EINVAL;
                return gv100_fifo_gpfifo_new_(&tu102_fifo_gpfifo, fifo,
                                              &args->v0.runlist,
                                              &args->v0.chid,
index be94156..a08742c 100644 (file)
@@ -136,7 +136,6 @@ nv50_fifo = {
        .pause = nv04_fifo_pause,
        .start = nv04_fifo_start,
        .chan = {
-               &nv50_fifo_dma_oclass,
                &nv50_fifo_gpfifo_oclass,
                NULL
        },
index fac2f9a..e530bb8 100644 (file)
@@ -41,7 +41,7 @@ nvkm_umem_search(struct nvkm_client *client, u64 handle)
 
        object = nvkm_object_search(client, handle, &nvkm_umem);
        if (IS_ERR(object)) {
-               if (client->super && client != master) {
+               if (client != master) {
                        spin_lock(&master->lock);
                        list_for_each_entry(umem, &master->umem, head) {
                                if (umem->object.object == handle) {
@@ -53,8 +53,7 @@ nvkm_umem_search(struct nvkm_client *client, u64 handle)
                }
        } else {
                umem = nvkm_umem(object);
-               if (!umem->priv || client->super)
-                       memory = nvkm_memory_ref(umem->memory);
+               memory = nvkm_memory_ref(umem->memory);
        }
 
        return memory ? memory : ERR_PTR(-ENOENT);
@@ -167,7 +166,6 @@ nvkm_umem_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
        nvkm_object_ctor(&nvkm_umem, oclass, &umem->object);
        umem->mmu = mmu;
        umem->type = mmu->type[type].type;
-       umem->priv = oclass->client->super;
        INIT_LIST_HEAD(&umem->head);
        *pobject = &umem->object;
 
index 85cf692..d56a594 100644 (file)
@@ -8,7 +8,6 @@ struct nvkm_umem {
        struct nvkm_object object;
        struct nvkm_mmu *mmu;
        u8 type:8;
-       bool priv:1;
        bool mappable:1;
        bool io:1;
 
index 0e4b894..6870fda 100644 (file)
@@ -34,7 +34,7 @@ nvkm_ummu_sclass(struct nvkm_object *object, int index,
 {
        struct nvkm_mmu *mmu = nvkm_ummu(object)->mmu;
 
-       if (mmu->func->mem.user.oclass && oclass->client->super) {
+       if (mmu->func->mem.user.oclass) {
                if (index-- == 0) {
                        oclass->base = mmu->func->mem.user;
                        oclass->ctor = nvkm_umem_new;
index c43b824..d6a1f8d 100644 (file)
@@ -45,7 +45,6 @@ nvkm_uvmm_search(struct nvkm_client *client, u64 handle)
 static int
 nvkm_uvmm_mthd_pfnclr(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
 {
-       struct nvkm_client *client = uvmm->object.client;
        union {
                struct nvif_vmm_pfnclr_v0 v0;
        } *args = argv;
@@ -59,9 +58,6 @@ nvkm_uvmm_mthd_pfnclr(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
        } else
                return ret;
 
-       if (!client->super)
-               return -ENOENT;
-
        if (size) {
                mutex_lock(&vmm->mutex);
                ret = nvkm_vmm_pfn_unmap(vmm, addr, size);
@@ -74,7 +70,6 @@ nvkm_uvmm_mthd_pfnclr(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
 static int
 nvkm_uvmm_mthd_pfnmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
 {
-       struct nvkm_client *client = uvmm->object.client;
        union {
                struct nvif_vmm_pfnmap_v0 v0;
        } *args = argv;
@@ -93,9 +88,6 @@ nvkm_uvmm_mthd_pfnmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
        } else
                return ret;
 
-       if (!client->super)
-               return -ENOENT;
-
        if (size) {
                mutex_lock(&vmm->mutex);
                ret = nvkm_vmm_pfn_map(vmm, page, addr, size, phys);
@@ -108,7 +100,6 @@ nvkm_uvmm_mthd_pfnmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
 static int
 nvkm_uvmm_mthd_unmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
 {
-       struct nvkm_client *client = uvmm->object.client;
        union {
                struct nvif_vmm_unmap_v0 v0;
        } *args = argv;
@@ -130,9 +121,8 @@ nvkm_uvmm_mthd_unmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
                goto done;
        }
 
-       if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) {
-               VMM_DEBUG(vmm, "denied %016llx: %d %d %d", addr,
-                         vma->user, !client->super, vma->busy);
+       if (ret = -ENOENT, vma->busy) {
+               VMM_DEBUG(vmm, "denied %016llx: %d", addr, vma->busy);
                goto done;
        }
 
@@ -181,9 +171,8 @@ nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
                goto fail;
        }
 
-       if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) {
-               VMM_DEBUG(vmm, "denied %016llx: %d %d %d", addr,
-                         vma->user, !client->super, vma->busy);
+       if (ret = -ENOENT, vma->busy) {
+               VMM_DEBUG(vmm, "denied %016llx: %d", addr, vma->busy);
                goto fail;
        }
 
@@ -230,7 +219,6 @@ fail:
 static int
 nvkm_uvmm_mthd_put(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
 {
-       struct nvkm_client *client = uvmm->object.client;
        union {
                struct nvif_vmm_put_v0 v0;
        } *args = argv;
@@ -252,9 +240,8 @@ nvkm_uvmm_mthd_put(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
                goto done;
        }
 
-       if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) {
-               VMM_DEBUG(vmm, "denied %016llx: %d %d %d", addr,
-                         vma->user, !client->super, vma->busy);
+       if (ret = -ENOENT, vma->busy) {
+               VMM_DEBUG(vmm, "denied %016llx: %d", addr, vma->busy);
                goto done;
        }
 
@@ -268,7 +255,6 @@ done:
 static int
 nvkm_uvmm_mthd_get(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
 {
-       struct nvkm_client *client = uvmm->object.client;
        union {
                struct nvif_vmm_get_v0 v0;
        } *args = argv;
@@ -297,7 +283,6 @@ nvkm_uvmm_mthd_get(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
                return ret;
 
        args->v0.addr = vma->addr;
-       vma->user = !client->super;
        return ret;
 }
 
index 710f3f8..8bf00b3 100644 (file)
@@ -774,7 +774,6 @@ nvkm_vma_tail(struct nvkm_vma *vma, u64 tail)
        new->refd = vma->refd;
        new->used = vma->used;
        new->part = vma->part;
-       new->user = vma->user;
        new->busy = vma->busy;
        new->mapped = vma->mapped;
        list_add(&new->head, &vma->head);
@@ -951,7 +950,7 @@ nvkm_vmm_node_split(struct nvkm_vmm *vmm,
 static void
 nvkm_vma_dump(struct nvkm_vma *vma)
 {
-       printk(KERN_ERR "%016llx %016llx %c%c%c%c%c%c%c%c%c %p\n",
+       printk(KERN_ERR "%016llx %016llx %c%c%c%c%c%c%c%c %p\n",
               vma->addr, (u64)vma->size,
               vma->used ? '-' : 'F',
               vma->mapref ? 'R' : '-',
@@ -959,7 +958,6 @@ nvkm_vma_dump(struct nvkm_vma *vma)
               vma->page != NVKM_VMA_PAGE_NONE ? '0' + vma->page : '-',
               vma->refd != NVKM_VMA_PAGE_NONE ? '0' + vma->refd : '-',
               vma->part ? 'P' : '-',
-              vma->user ? 'U' : '-',
               vma->busy ? 'B' : '-',
               vma->mapped ? 'M' : '-',
               vma->memory);
@@ -1024,7 +1022,6 @@ nvkm_vmm_ctor_managed(struct nvkm_vmm *vmm, u64 addr, u64 size)
        vma->mapref = true;
        vma->sparse = false;
        vma->used = true;
-       vma->user = true;
        nvkm_vmm_node_insert(vmm, vma);
        list_add_tail(&vma->head, &vmm->list);
        return 0;
@@ -1615,7 +1612,6 @@ nvkm_vmm_put_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
        vma->page = NVKM_VMA_PAGE_NONE;
        vma->refd = NVKM_VMA_PAGE_NONE;
        vma->used = false;
-       vma->user = false;
        nvkm_vmm_put_region(vmm, vma);
 }
 
index f02abd9..b5e7337 100644 (file)
@@ -534,15 +534,13 @@ int
 gp100_vmm_mthd(struct nvkm_vmm *vmm,
               struct nvkm_client *client, u32 mthd, void *argv, u32 argc)
 {
-       if (client->super) {
-               switch (mthd) {
-               case GP100_VMM_VN_FAULT_REPLAY:
-                       return gp100_vmm_fault_replay(vmm, argv, argc);
-               case GP100_VMM_VN_FAULT_CANCEL:
-                       return gp100_vmm_fault_cancel(vmm, argv, argc);
-               default:
-                       break;
-               }
+       switch (mthd) {
+       case GP100_VMM_VN_FAULT_REPLAY:
+               return gp100_vmm_fault_replay(vmm, argv, argc);
+       case GP100_VMM_VN_FAULT_CANCEL:
+               return gp100_vmm_fault_cancel(vmm, argv, argc);
+       default:
+               break;
        }
        return -EINVAL;
 }
index 21939d4..1b80290 100644 (file)
@@ -4166,7 +4166,7 @@ static const struct drm_display_mode yes_optoelectronics_ytc700tlag_05_201c_mode
 static const struct panel_desc yes_optoelectronics_ytc700tlag_05_201c = {
        .modes = &yes_optoelectronics_ytc700tlag_05_201c_mode,
        .num_modes = 1,
-       .bpc = 6,
+       .bpc = 8,
        .size = {
                .width = 154,
                .height = 90,
index 519deea..2df59b3 100644 (file)
@@ -44,6 +44,8 @@ static unsigned ttm_glob_use_count;
 struct ttm_global ttm_glob;
 EXPORT_SYMBOL(ttm_glob);
 
+struct dentry *ttm_debugfs_root;
+
 static void ttm_global_release(void)
 {
        struct ttm_global *glob = &ttm_glob;
@@ -53,6 +55,7 @@ static void ttm_global_release(void)
                goto out;
 
        ttm_pool_mgr_fini();
+       debugfs_remove(ttm_debugfs_root);
 
        __free_page(glob->dummy_read_page);
        memset(glob, 0, sizeof(*glob));
@@ -73,6 +76,11 @@ static int ttm_global_init(void)
 
        si_meminfo(&si);
 
+       ttm_debugfs_root = debugfs_create_dir("ttm", NULL);
+       if (IS_ERR(ttm_debugfs_root)) {
+               ttm_debugfs_root = NULL;
+       }
+
        /* Limit the number of pages in the pool to about 50% of the total
         * system memory.
         */
@@ -100,6 +108,8 @@ static int ttm_global_init(void)
        debugfs_create_atomic_t("buffer_objects", 0444, ttm_debugfs_root,
                                &glob->bo_count);
 out:
+       if (ret && ttm_debugfs_root)
+               debugfs_remove(ttm_debugfs_root);
        if (ret)
                --ttm_glob_use_count;
        mutex_unlock(&ttm_global_mutex);
index 997c458..7fcdef2 100644 (file)
@@ -72,22 +72,6 @@ pgprot_t ttm_prot_from_caching(enum ttm_caching caching, pgprot_t tmp)
        return tmp;
 }
 
-struct dentry *ttm_debugfs_root;
-
-static int __init ttm_init(void)
-{
-       ttm_debugfs_root = debugfs_create_dir("ttm", NULL);
-       return 0;
-}
-
-static void __exit ttm_exit(void)
-{
-       debugfs_remove(ttm_debugfs_root);
-}
-
-module_init(ttm_init);
-module_exit(ttm_exit);
-
 MODULE_AUTHOR("Thomas Hellstrom, Jerome Glisse");
 MODULE_DESCRIPTION("TTM memory manager subsystem (for DRM device)");
 MODULE_LICENSE("GPL and additional rights");
index d1cef3b..5652d98 100644 (file)
@@ -492,7 +492,7 @@ struct vmw_private {
        resource_size_t vram_start;
        resource_size_t vram_size;
        resource_size_t prim_bb_mem;
-       void __iomem *rmmio;
+       u32 __iomem *rmmio;
        u32 *fifo_mem;
        resource_size_t fifo_mem_size;
        uint32_t fb_max_width;
index d166ee2..1183185 100644 (file)
@@ -1003,19 +1003,16 @@ err_cpmem:
 static void ipu_irq_handle(struct ipu_soc *ipu, const int *regs, int num_regs)
 {
        unsigned long status;
-       int i, bit, irq;
+       int i, bit;
 
        for (i = 0; i < num_regs; i++) {
 
                status = ipu_cm_read(ipu, IPU_INT_STAT(regs[i]));
                status &= ipu_cm_read(ipu, IPU_INT_CTRL(regs[i]));
 
-               for_each_set_bit(bit, &status, 32) {
-                       irq = irq_linear_revmap(ipu->domain,
-                                               regs[i] * 32 + bit);
-                       if (irq)
-                               generic_handle_irq(irq);
-               }
+               for_each_set_bit(bit, &status, 32)
+                       generic_handle_domain_irq(ipu->domain,
+                                                 regs[i] * 32 + bit);
        }
 }
 
index a1c85d1..82b244c 100644 (file)
@@ -585,21 +585,21 @@ static const struct ipu_rgb def_bgra_16 = {
        .bits_per_pixel = 16,
 };
 
-#define Y_OFFSET(pix, x, y)    ((x) + pix->width * (y))
-#define U_OFFSET(pix, x, y)    ((pix->width * pix->height) +           \
-                                (pix->width * ((y) / 2) / 2) + (x) / 2)
-#define V_OFFSET(pix, x, y)    ((pix->width * pix->height) +           \
-                                (pix->width * pix->height / 4) +       \
-                                (pix->width * ((y) / 2) / 2) + (x) / 2)
-#define U2_OFFSET(pix, x, y)   ((pix->width * pix->height) +           \
-                                (pix->width * (y) / 2) + (x) / 2)
-#define V2_OFFSET(pix, x, y)   ((pix->width * pix->height) +           \
-                                (pix->width * pix->height / 2) +       \
-                                (pix->width * (y) / 2) + (x) / 2)
-#define UV_OFFSET(pix, x, y)   ((pix->width * pix->height) +   \
-                                (pix->width * ((y) / 2)) + (x))
-#define UV2_OFFSET(pix, x, y)  ((pix->width * pix->height) +   \
-                                (pix->width * y) + (x))
+#define Y_OFFSET(pix, x, y)    ((x) + pix->bytesperline * (y))
+#define U_OFFSET(pix, x, y)    ((pix->bytesperline * pix->height) +     \
+                                (pix->bytesperline * ((y) / 2) / 2) + (x) / 2)
+#define V_OFFSET(pix, x, y)    ((pix->bytesperline * pix->height) +     \
+                                (pix->bytesperline * pix->height / 4) + \
+                                (pix->bytesperline * ((y) / 2) / 2) + (x) / 2)
+#define U2_OFFSET(pix, x, y)   ((pix->bytesperline * pix->height) +     \
+                                (pix->bytesperline * (y) / 2) + (x) / 2)
+#define V2_OFFSET(pix, x, y)   ((pix->bytesperline * pix->height) +     \
+                                (pix->bytesperline * pix->height / 2) + \
+                                (pix->bytesperline * (y) / 2) + (x) / 2)
+#define UV_OFFSET(pix, x, y)   ((pix->bytesperline * pix->height) +     \
+                                (pix->bytesperline * ((y) / 2)) + (x))
+#define UV2_OFFSET(pix, x, y)  ((pix->bytesperline * pix->height) +     \
+                                (pix->bytesperline * y) + (x))
 
 #define NUM_ALPHA_CHANNELS     7
 
index 1605549..76937f7 100644 (file)
@@ -576,7 +576,7 @@ config HID_LOGITECH_HIDPP
        depends on HID_LOGITECH
        select POWER_SUPPLY
        help
-       Support for Logitech devices relyingon the HID++ Logitech specification
+       Support for Logitech devices relying on the HID++ Logitech specification
 
        Say Y if you want support for Logitech devices relying on the HID++
        specification. Such devices are the various Logitech Touchpads (T650,
index 96e2577..8d68796 100644 (file)
@@ -58,7 +58,7 @@ static void amd_stop_sensor_v2(struct amd_mp2_dev *privdata, u16 sensor_idx)
        cmd_base.cmd_v2.sensor_id = sensor_idx;
        cmd_base.cmd_v2.length  = 16;
 
-       writeq(0x0, privdata->mmio + AMD_C2P_MSG2);
+       writeq(0x0, privdata->mmio + AMD_C2P_MSG1);
        writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG0);
 }
 
index 6b8f0d0..dc6bd42 100644 (file)
@@ -501,6 +501,8 @@ static const struct hid_device_id apple_devices[] = {
                        APPLE_RDESC_JIS },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ANSI),
                .driver_data = APPLE_HAS_FN },
+       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ANSI),
+               .driver_data = APPLE_HAS_FN },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ISO),
                .driver_data = APPLE_HAS_FN },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ISO),
index fca8fc7..fb807c8 100644 (file)
@@ -485,9 +485,6 @@ static void asus_kbd_backlight_set(struct led_classdev *led_cdev,
 {
        struct asus_kbd_leds *led = container_of(led_cdev, struct asus_kbd_leds,
                                                 cdev);
-       if (led->brightness == brightness)
-               return;
-
        led->brightness = brightness;
        schedule_work(&led->work);
 }
index f43a840..4ef1c3b 100644 (file)
@@ -742,7 +742,7 @@ static int ft260_is_interface_enabled(struct hid_device *hdev)
        int ret;
 
        ret = ft260_get_system_config(hdev, &cfg);
-       if (ret)
+       if (ret < 0)
                return ret;
 
        ft260_dbg("interface:  0x%02x\n", interface);
@@ -754,23 +754,16 @@ static int ft260_is_interface_enabled(struct hid_device *hdev)
        switch (cfg.chip_mode) {
        case FT260_MODE_ALL:
        case FT260_MODE_BOTH:
-               if (interface == 1) {
+               if (interface == 1)
                        hid_info(hdev, "uart interface is not supported\n");
-                       return 0;
-               }
-               ret = 1;
+               else
+                       ret = 1;
                break;
        case FT260_MODE_UART:
-               if (interface == 0) {
-                       hid_info(hdev, "uart is unsupported on interface 0\n");
-                       ret = 0;
-               }
+               hid_info(hdev, "uart interface is not supported\n");
                break;
        case FT260_MODE_I2C:
-               if (interface == 1) {
-                       hid_info(hdev, "i2c is unsupported on interface 1\n");
-                       ret = 0;
-               }
+               ret = 1;
                break;
        }
        return ret;
@@ -785,7 +778,7 @@ static int ft260_byte_show(struct hid_device *hdev, int id, u8 *cfg, int len,
        if (ret < 0)
                return ret;
 
-       return scnprintf(buf, PAGE_SIZE, "%hi\n", *field);
+       return scnprintf(buf, PAGE_SIZE, "%d\n", *field);
 }
 
 static int ft260_word_show(struct hid_device *hdev, int id, u8 *cfg, int len,
@@ -797,7 +790,7 @@ static int ft260_word_show(struct hid_device *hdev, int id, u8 *cfg, int len,
        if (ret < 0)
                return ret;
 
-       return scnprintf(buf, PAGE_SIZE, "%hi\n", le16_to_cpu(*field));
+       return scnprintf(buf, PAGE_SIZE, "%d\n", le16_to_cpu(*field));
 }
 
 #define FT260_ATTR_SHOW(name, reptype, id, type, func)                        \
@@ -1004,11 +997,9 @@ err_hid_stop:
 
 static void ft260_remove(struct hid_device *hdev)
 {
-       int ret;
        struct ft260_device *dev = hid_get_drvdata(hdev);
 
-       ret = ft260_is_interface_enabled(hdev);
-       if (ret <= 0)
+       if (!dev)
                return;
 
        sysfs_remove_group(&hdev->dev.kobj, &ft260_attr_group);
index 6b1fa97..91bf4d0 100644 (file)
@@ -784,6 +784,17 @@ static void hid_ishtp_cl_reset_handler(struct work_struct *work)
        }
 }
 
+static void hid_ishtp_cl_resume_handler(struct work_struct *work)
+{
+       struct ishtp_cl_data *client_data = container_of(work, struct ishtp_cl_data, resume_work);
+       struct ishtp_cl *hid_ishtp_cl = client_data->hid_ishtp_cl;
+
+       if (ishtp_wait_resume(ishtp_get_ishtp_device(hid_ishtp_cl))) {
+               client_data->suspended = false;
+               wake_up_interruptible(&client_data->ishtp_resume_wait);
+       }
+}
+
 ishtp_print_log ishtp_hid_print_trace;
 
 /**
@@ -822,6 +833,8 @@ static int hid_ishtp_cl_probe(struct ishtp_cl_device *cl_device)
        init_waitqueue_head(&client_data->ishtp_resume_wait);
 
        INIT_WORK(&client_data->work, hid_ishtp_cl_reset_handler);
+       INIT_WORK(&client_data->resume_work, hid_ishtp_cl_resume_handler);
+
 
        ishtp_hid_print_trace = ishtp_trace_callback(cl_device);
 
@@ -921,7 +934,7 @@ static int hid_ishtp_cl_resume(struct device *device)
 
        hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__,
                        hid_ishtp_cl);
-       client_data->suspended = false;
+       schedule_work(&client_data->resume_work);
        return 0;
 }
 
index f88443a..6a5cc11 100644 (file)
@@ -135,6 +135,7 @@ struct ishtp_cl_data {
        int multi_packet_cnt;
 
        struct work_struct work;
+       struct work_struct resume_work;
        struct ishtp_cl_device *cl_device;
 };
 
index f0802b0..aa2c516 100644 (file)
@@ -314,13 +314,6 @@ static int ishtp_cl_device_resume(struct device *dev)
        if (!device)
                return 0;
 
-       /*
-        * When ISH needs hard reset, it is done asynchrnously, hence bus
-        * resume will  be called before full ISH resume
-        */
-       if (device->ishtp_dev->resume_flag)
-               return 0;
-
        driver = to_ishtp_cl_driver(dev->driver);
        if (driver && driver->driver.pm) {
                if (driver->driver.pm->resume)
@@ -849,6 +842,28 @@ struct device *ishtp_device(struct ishtp_cl_device *device)
 }
 EXPORT_SYMBOL(ishtp_device);
 
+/**
+ * ishtp_wait_resume() - Wait for IPC resume
+ *
+ * Wait for IPC resume
+ *
+ * Return: resume complete or not
+ */
+bool ishtp_wait_resume(struct ishtp_device *dev)
+{
+       /* 50ms to get resume response */
+       #define WAIT_FOR_RESUME_ACK_MS          50
+
+       /* Waiting to get resume response */
+       if (dev->resume_flag)
+               wait_event_interruptible_timeout(dev->resume_wait,
+                                                !dev->resume_flag,
+                                                msecs_to_jiffies(WAIT_FOR_RESUME_ACK_MS));
+
+       return (!dev->resume_flag);
+}
+EXPORT_SYMBOL_GPL(ishtp_wait_resume);
+
 /**
  * ishtp_get_pci_device() - Return PCI device dev pointer
  * This interface is used to return PCI device pointer
index dcf3a23..7c2032f 100644 (file)
@@ -38,7 +38,7 @@ config USB_HIDDEV
        help
          Say Y here if you want to support HID devices (from the USB
          specification standpoint) that aren't strictly user interface
-         devices, like monitor controls and Uninterruptable Power Supplies.
+         devices, like monitor controls and Uninterruptible Power Supplies.
 
          This module supports these devices separately using a separate
          event interface on /dev/usb/hiddevX (char 180:96 to 180:111).
index 81d7d12..81ba642 100644 (file)
@@ -2548,6 +2548,9 @@ static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac,
                int slot;
 
                slot = input_mt_get_slot_by_key(input, hid_data->id);
+               if (slot < 0)
+                       return;
+
                input_mt_slot(input, slot);
                input_mt_report_slot_state(input, MT_TOOL_FINGER, prox);
        }
@@ -3831,7 +3834,7 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
                    wacom_wac->shared->touch->product == 0xF6) {
                        input_dev->evbit[0] |= BIT_MASK(EV_SW);
                        __set_bit(SW_MUTE_DEVICE, input_dev->swbit);
-                       wacom_wac->shared->has_mute_touch_switch = true;
+                       wacom_wac->has_mute_touch_switch = true;
                }
                fallthrough;
 
index cceaf69..6304d1d 100644 (file)
@@ -1224,14 +1224,14 @@ static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave)
 
        disable_irq(iproc_i2c->irq);
 
+       tasklet_kill(&iproc_i2c->slave_rx_tasklet);
+
        /* disable all slave interrupts */
        tmp = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
        tmp &= ~(IE_S_ALL_INTERRUPT_MASK <<
                        IE_S_ALL_INTERRUPT_SHIFT);
        iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, tmp);
 
-       tasklet_kill(&iproc_i2c->slave_rx_tasklet);
-
        /* Erase the slave address programmed */
        tmp = iproc_i2c_rd_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET);
        tmp &= ~BIT(S_CFG_EN_NIC_SMB_ADDR3_SHIFT);
index cb64fe6..77f576e 100644 (file)
@@ -141,7 +141,7 @@ static ssize_t i2cdev_read(struct file *file, char __user *buf, size_t count,
        if (count > 8192)
                count = 8192;
 
-       tmp = kmalloc(count, GFP_KERNEL);
+       tmp = kzalloc(count, GFP_KERNEL);
        if (tmp == NULL)
                return -ENOMEM;
 
@@ -150,7 +150,8 @@ static ssize_t i2cdev_read(struct file *file, char __user *buf, size_t count,
 
        ret = i2c_master_recv(client, tmp, count);
        if (ret >= 0)
-               ret = copy_to_user(buf, tmp, count) ? -EFAULT : ret;
+               if (copy_to_user(buf, tmp, ret))
+                       ret = -EFAULT;
        kfree(tmp);
        return ret;
 }
index 0e56ace..8d8b1ba 100644 (file)
@@ -231,6 +231,7 @@ config DMARD10
 
 config FXLS8962AF
        tristate
+       depends on I2C || !I2C # cannot be built-in for modular I2C
 
 config FXLS8962AF_I2C
        tristate "NXP FXLS8962AF/FXLS8964AF Accelerometer I2C Driver"
@@ -247,6 +248,7 @@ config FXLS8962AF_I2C
 config FXLS8962AF_SPI
        tristate "NXP FXLS8962AF/FXLS8964AF Accelerometer SPI Driver"
        depends on SPI
+       depends on I2C || !I2C
        select FXLS8962AF
        select REGMAP_SPI
        help
index 078d878..0019f1e 100644 (file)
@@ -637,7 +637,7 @@ static int fxls8962af_i2c_raw_read_errata3(struct fxls8962af_data *data,
                        return ret;
        }
 
-       return ret;
+       return 0;
 }
 
 static int fxls8962af_fifo_transfer(struct fxls8962af_data *data,
index 6ef0960..f9c8385 100644 (file)
@@ -664,8 +664,8 @@ static int palmas_adc_wakeup_configure(struct palmas_gpadc *adc)
 
        adc_period = adc->auto_conversion_period;
        for (i = 0; i < 16; ++i) {
-               if (((1000 * (1 << i)) / 32) < adc_period)
-                       continue;
+               if (((1000 * (1 << i)) / 32) >= adc_period)
+                       break;
        }
        if (i > 0)
                i--;
index 7010c42..c56fccb 100644 (file)
@@ -16,6 +16,8 @@
 #include <linux/completion.h>
 #include <linux/regmap.h>
 #include <linux/iio/iio.h>
+#include <linux/iio/driver.h>
+#include <linux/iio/machine.h>
 #include <linux/slab.h>
 
 #define RN5T618_ADC_CONVERSION_TIMEOUT   (msecs_to_jiffies(500))
@@ -189,6 +191,19 @@ static const struct iio_chan_spec rn5t618_adc_iio_channels[] = {
        RN5T618_ADC_CHANNEL(AIN0, IIO_VOLTAGE, "AIN0")
 };
 
+static struct iio_map rn5t618_maps[] = {
+       IIO_MAP("VADP", "rn5t618-power", "vadp"),
+       IIO_MAP("VUSB", "rn5t618-power", "vusb"),
+       { /* sentinel */ }
+};
+
+static void unregister_map(void *data)
+{
+       struct iio_dev *iio_dev = (struct iio_dev *) data;
+
+       iio_map_array_unregister(iio_dev);
+}
+
 static int rn5t618_adc_probe(struct platform_device *pdev)
 {
        int ret;
@@ -239,6 +254,14 @@ static int rn5t618_adc_probe(struct platform_device *pdev)
                return ret;
        }
 
+       ret = iio_map_array_register(iio_dev, rn5t618_maps);
+       if (ret < 0)
+               return ret;
+
+       ret = devm_add_action_or_reset(adc->dev, unregister_map, iio_dev);
+       if (ret < 0)
+               return ret;
+
        return devm_iio_device_register(adc->dev, iio_dev);
 }
 
index 2383eac..a2b83f0 100644 (file)
@@ -568,7 +568,6 @@ static int ti_ads7950_probe(struct spi_device *spi)
        st->ring_xfer.tx_buf = &st->tx_buf[0];
        st->ring_xfer.rx_buf = &st->rx_buf[0];
        /* len will be set later */
-       st->ring_xfer.cs_change = true;
 
        spi_message_add_tail(&st->ring_xfer, &st->ring_msg);
 
index 2a957f1..9e0fce9 100644 (file)
@@ -25,6 +25,8 @@
 #include <linux/iio/trigger_consumer.h>
 #include <linux/iio/triggered_buffer.h>
 
+#include <linux/time.h>
+
 #define HDC100X_REG_TEMP                       0x00
 #define HDC100X_REG_HUMIDITY                   0x01
 
@@ -166,7 +168,7 @@ static int hdc100x_get_measurement(struct hdc100x_data *data,
                                   struct iio_chan_spec const *chan)
 {
        struct i2c_client *client = data->client;
-       int delay = data->adc_int_us[chan->address];
+       int delay = data->adc_int_us[chan->address] + 1*USEC_PER_MSEC;
        int ret;
        __be16 val;
 
@@ -316,7 +318,7 @@ static irqreturn_t hdc100x_trigger_handler(int irq, void *p)
        struct iio_dev *indio_dev = pf->indio_dev;
        struct hdc100x_data *data = iio_priv(indio_dev);
        struct i2c_client *client = data->client;
-       int delay = data->adc_int_us[0] + data->adc_int_us[1];
+       int delay = data->adc_int_us[0] + data->adc_int_us[1] + 2*USEC_PER_MSEC;
        int ret;
 
        /* dual read starts at temp register */
index a5b421f..b9a06ca 100644 (file)
@@ -411,12 +411,11 @@ int __adis_initial_startup(struct adis *adis)
        int ret;
 
        /* check if the device has rst pin low */
-       gpio = devm_gpiod_get_optional(&adis->spi->dev, "reset", GPIOD_ASIS);
+       gpio = devm_gpiod_get_optional(&adis->spi->dev, "reset", GPIOD_OUT_HIGH);
        if (IS_ERR(gpio))
                return PTR_ERR(gpio);
 
        if (gpio) {
-               gpiod_set_value_cansleep(gpio, 1);
                msleep(10);
                /* bring device out of reset */
                gpiod_set_value_cansleep(gpio, 0);
index 515a7e9..5d3b8b8 100644 (file)
@@ -926,12 +926,25 @@ static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
        return ret;
 }
 
+static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
+{
+       struct ib_qp_attr qp_attr;
+       int qp_attr_mask, ret;
+
+       qp_attr.qp_state = IB_QPS_INIT;
+       ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
+       if (ret)
+               return ret;
+
+       return ib_modify_qp(qp, &qp_attr, qp_attr_mask);
+}
+
 int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
                   struct ib_qp_init_attr *qp_init_attr)
 {
        struct rdma_id_private *id_priv;
        struct ib_qp *qp;
-       int ret = 0;
+       int ret;
 
        id_priv = container_of(id, struct rdma_id_private, id);
        if (id->device != pd->device) {
@@ -948,6 +961,8 @@ int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
 
        if (id->qp_type == IB_QPT_UD)
                ret = cma_init_ud_qp(id_priv, qp);
+       else
+               ret = cma_init_conn_qp(id_priv, qp);
        if (ret)
                goto out_destroy;
 
index f782d5e..03e1db5 100644 (file)
@@ -249,6 +249,9 @@ static int UVERBS_HANDLER(UVERBS_METHOD_REG_DMABUF_MR)(
        mr->uobject = uobj;
        atomic_inc(&pd->usecnt);
 
+       rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR);
+       rdma_restrack_set_name(&mr->res, NULL);
+       rdma_restrack_add(&mr->res);
        uobj->object = mr;
 
        uverbs_finalize_uobj_create(attrs, UVERBS_ATTR_REG_DMABUF_MR_HANDLE);
index 283b6b8..ea0054c 100644 (file)
@@ -1681,6 +1681,7 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
        if (nq)
                nq->budget++;
        atomic_inc(&rdev->srq_count);
+       spin_lock_init(&srq->lock);
 
        return 0;
 
index a8688a9..4678bd6 100644 (file)
@@ -1397,7 +1397,6 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
        memset(&rattr, 0, sizeof(rattr));
        rc = bnxt_re_register_netdev(rdev);
        if (rc) {
-               rtnl_unlock();
                ibdev_err(&rdev->ibdev,
                          "Failed to register with netedev: %#x\n", rc);
                return -EINVAL;
index 6c8c910..c7e8d7b 100644 (file)
@@ -967,6 +967,12 @@ int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
        return !err || err == -ENODATA ? npolled : err;
 }
 
+void c4iw_cq_rem_ref(struct c4iw_cq *chp)
+{
+       if (refcount_dec_and_test(&chp->refcnt))
+               complete(&chp->cq_rel_comp);
+}
+
 int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
 {
        struct c4iw_cq *chp;
@@ -976,8 +982,8 @@ int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
        chp = to_c4iw_cq(ib_cq);
 
        xa_erase_irq(&chp->rhp->cqs, chp->cq.cqid);
-       refcount_dec(&chp->refcnt);
-       wait_event(chp->wait, !refcount_read(&chp->refcnt));
+       c4iw_cq_rem_ref(chp);
+       wait_for_completion(&chp->cq_rel_comp);
 
        ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext,
                                             ibucontext);
@@ -1081,7 +1087,7 @@ int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
        spin_lock_init(&chp->lock);
        spin_lock_init(&chp->comp_handler_lock);
        refcount_set(&chp->refcnt, 1);
-       init_waitqueue_head(&chp->wait);
+       init_completion(&chp->cq_rel_comp);
        ret = xa_insert_irq(&rhp->cqs, chp->cq.cqid, chp, GFP_KERNEL);
        if (ret)
                goto err_destroy_cq;
index 7798d09..34211a5 100644 (file)
@@ -213,8 +213,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
                break;
        }
 done:
-       if (refcount_dec_and_test(&chp->refcnt))
-               wake_up(&chp->wait);
+       c4iw_cq_rem_ref(chp);
        c4iw_qp_rem_ref(&qhp->ibqp);
 out:
        return;
@@ -234,8 +233,7 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
                spin_lock_irqsave(&chp->comp_handler_lock, flag);
                (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
                spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
-               if (refcount_dec_and_test(&chp->refcnt))
-                       wake_up(&chp->wait);
+               c4iw_cq_rem_ref(chp);
        } else {
                pr_debug("unknown cqid 0x%x\n", qid);
                xa_unlock_irqrestore(&dev->cqs, flag);
index 3883af3..ac5f581 100644 (file)
@@ -428,7 +428,7 @@ struct c4iw_cq {
        spinlock_t lock;
        spinlock_t comp_handler_lock;
        refcount_t refcnt;
-       wait_queue_head_t wait;
+       struct completion cq_rel_comp;
        struct c4iw_wr_wait *wr_waitp;
 };
 
@@ -979,6 +979,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start,
 struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc);
 int c4iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
 int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
+void c4iw_cq_rem_ref(struct c4iw_cq *chp);
 int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
                   struct ib_udata *udata);
 int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
index 203e6dd..be4a07b 100644 (file)
@@ -357,6 +357,7 @@ static int efa_enable_msix(struct efa_dev *dev)
        }
 
        if (irq_num != msix_vecs) {
+               efa_disable_msix(dev);
                dev_err(&dev->pdev->dev,
                        "Allocated %d MSI-X (out of %d requested)\n",
                        irq_num, msix_vecs);
index eb15c31..e83dc56 100644 (file)
@@ -3055,6 +3055,7 @@ static void __sdma_process_event(struct sdma_engine *sde,
 static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
 {
        int i;
+       struct sdma_desc *descp;
 
        /* Handle last descriptor */
        if (unlikely((tx->num_desc == (MAX_DESC - 1)))) {
@@ -3075,12 +3076,10 @@ static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
        if (unlikely(tx->num_desc == MAX_DESC))
                goto enomem;
 
-       tx->descp = kmalloc_array(
-                       MAX_DESC,
-                       sizeof(struct sdma_desc),
-                       GFP_ATOMIC);
-       if (!tx->descp)
+       descp = kmalloc_array(MAX_DESC, sizeof(struct sdma_desc), GFP_ATOMIC);
+       if (!descp)
                goto enomem;
+       tx->descp = descp;
 
        /* reserve last descriptor for coalescing */
        tx->desc_limit = MAX_DESC - 1;
index 8f68cc3..84f3f2b 100644 (file)
@@ -213,8 +213,10 @@ int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev)
 
        hr_cmd->context =
                kcalloc(hr_cmd->max_cmds, sizeof(*hr_cmd->context), GFP_KERNEL);
-       if (!hr_cmd->context)
+       if (!hr_cmd->context) {
+               hr_dev->cmd_mod = 0;
                return -ENOMEM;
+       }
 
        for (i = 0; i < hr_cmd->max_cmds; ++i) {
                hr_cmd->context[i].token = i;
@@ -228,7 +230,6 @@ int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev)
        spin_lock_init(&hr_cmd->context_lock);
 
        hr_cmd->use_events = 1;
-       down(&hr_cmd->poll_sem);
 
        return 0;
 }
@@ -239,8 +240,6 @@ void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev)
 
        kfree(hr_cmd->context);
        hr_cmd->use_events = 0;
-
-       up(&hr_cmd->poll_sem);
 }
 
 struct hns_roce_cmd_mailbox *
index 078a971..cc6eab1 100644 (file)
@@ -873,11 +873,9 @@ int hns_roce_init(struct hns_roce_dev *hr_dev)
 
        if (hr_dev->cmd_mod) {
                ret = hns_roce_cmd_use_events(hr_dev);
-               if (ret) {
+               if (ret)
                        dev_warn(dev,
                                 "Cmd event  mode failed, set back to poll!\n");
-                       hns_roce_cmd_use_polling(hr_dev);
-               }
        }
 
        ret = hns_roce_init_hem(hr_dev);
index dab8828..b6f9c41 100644 (file)
@@ -6,7 +6,7 @@ config INFINIBAND_IRDMA
        depends on PCI
        depends on ICE && I40E
        select GENERIC_ALLOCATOR
-       select CONFIG_AUXILIARY_BUS
+       select AUXILIARY_BUS
        help
          This is an Intel(R) Ethernet Protocol Driver for RDMA driver
          that support E810 (iWARP/RoCE) and X722 (iWARP) network devices.
index 7abeb57..b8e5e37 100644 (file)
@@ -945,7 +945,6 @@ int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
        u32 *cqb = NULL;
        void *cqc;
        int cqe_size;
-       unsigned int irqn;
        int eqn;
        int err;
 
@@ -984,7 +983,7 @@ int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
                INIT_WORK(&cq->notify_work, notify_soft_wc_handler);
        }
 
-       err = mlx5_vector2eqn(dev->mdev, vector, &eqn, &irqn);
+       err = mlx5_vector2eqn(dev->mdev, vector, &eqn);
        if (err)
                goto err_cqb;
 
@@ -1007,7 +1006,6 @@ int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
                goto err_cqb;
 
        mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn);
-       cq->mcq.irqn = irqn;
        if (udata)
                cq->mcq.tasklet_ctx.comp = mlx5_ib_cq_comp;
        else
index eb9b0a2..c869b2a 100644 (file)
@@ -975,7 +975,6 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)(
        struct mlx5_ib_dev *dev;
        int user_vector;
        int dev_eqn;
-       unsigned int irqn;
        int err;
 
        if (uverbs_copy_from(&user_vector, attrs,
@@ -987,7 +986,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)(
                return PTR_ERR(c);
        dev = to_mdev(c->ibucontext.device);
 
-       err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn, &irqn);
+       err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn);
        if (err < 0)
                return err;
 
index 094c976..2507051 100644 (file)
@@ -4454,7 +4454,8 @@ static void mlx5r_mp_remove(struct auxiliary_device *adev)
        mutex_lock(&mlx5_ib_multiport_mutex);
        if (mpi->ibdev)
                mlx5_ib_unbind_slave_port(mpi->ibdev, mpi);
-       list_del(&mpi->list);
+       else
+               list_del(&mpi->list);
        mutex_unlock(&mlx5_ib_multiport_mutex);
        kfree(mpi);
 }
index 3263851..3f1c5a4 100644 (file)
@@ -531,8 +531,8 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
                 */
                spin_unlock_irq(&ent->lock);
                need_delay = need_resched() || someone_adding(cache) ||
-                            time_after(jiffies,
-                                       READ_ONCE(cache->last_add) + 300 * HZ);
+                            !time_after(jiffies,
+                                        READ_ONCE(cache->last_add) + 300 * HZ);
                spin_lock_irq(&ent->lock);
                if (ent->disabled)
                        goto out;
index 0ea9a5a..1c1d1b5 100644 (file)
@@ -85,7 +85,7 @@ int rxe_mcast_add_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
                goto out;
        }
 
-       elem = rxe_alloc(&rxe->mc_elem_pool);
+       elem = rxe_alloc_locked(&rxe->mc_elem_pool);
        if (!elem) {
                err = -ENOMEM;
                goto out;
index dec9292..5ac27f2 100644 (file)
@@ -259,6 +259,7 @@ static void prepare_ipv4_hdr(struct dst_entry *dst, struct sk_buff *skb,
 
        iph->version    =       IPVERSION;
        iph->ihl        =       sizeof(struct iphdr) >> 2;
+       iph->tot_len    =       htons(skb->len);
        iph->frag_off   =       df;
        iph->protocol   =       proto;
        iph->tos        =       tos;
index 85b8125..72d9539 100644 (file)
@@ -63,7 +63,7 @@ struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, int *num_elem,
        if (*num_elem < 0)
                goto err1;
 
-       q = kmalloc(sizeof(*q), GFP_KERNEL);
+       q = kzalloc(sizeof(*q), GFP_KERNEL);
        if (!q)
                goto err1;
 
index 3743dc3..360ec67 100644 (file)
@@ -318,7 +318,7 @@ static enum resp_states get_srq_wqe(struct rxe_qp *qp)
                pr_warn("%s: invalid num_sge in SRQ entry\n", __func__);
                return RESPST_ERR_MALFORMED_WQE;
        }
-       size = sizeof(wqe) + wqe->dma.num_sge*sizeof(struct rxe_sge);
+       size = sizeof(*wqe) + wqe->dma.num_sge*sizeof(struct rxe_sge);
        memcpy(&qp->resp.srq_wqe, wqe, size);
 
        qp->resp.wqe = &qp->resp.srq_wqe.wqe;
index 8a1e70e..7887941 100644 (file)
@@ -403,7 +403,7 @@ struct icc_path *devm_of_icc_get(struct device *dev, const char *name)
 {
        struct icc_path **ptr, *path;
 
-       ptr = devres_alloc(devm_icc_release, sizeof(**ptr), GFP_KERNEL);
+       ptr = devres_alloc(devm_icc_release, sizeof(*ptr), GFP_KERNEL);
        if (!ptr)
                return ERR_PTR(-ENOMEM);
 
@@ -973,9 +973,14 @@ void icc_node_add(struct icc_node *node, struct icc_provider *provider)
        }
        node->avg_bw = node->init_avg;
        node->peak_bw = node->init_peak;
+
+       if (provider->pre_aggregate)
+               provider->pre_aggregate(node);
+
        if (provider->aggregate)
                provider->aggregate(node, 0, node->init_avg, node->init_peak,
                                    &node->avg_bw, &node->peak_bw);
+
        provider->set(node, node);
        node->avg_bw = 0;
        node->peak_bw = 0;
@@ -1106,6 +1111,8 @@ void icc_sync_state(struct device *dev)
                dev_dbg(p->dev, "interconnect provider is in synced state\n");
                list_for_each_entry(n, &p->nodes, node_list) {
                        if (n->init_avg || n->init_peak) {
+                               n->init_avg = 0;
+                               n->init_peak = 0;
                                aggregate_requests(n);
                                p->set(n, n);
                        }
index bf01d09..f6fae64 100644 (file)
@@ -57,6 +57,11 @@ int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
                        qn->sum_avg[i] += avg_bw;
                        qn->max_peak[i] = max_t(u32, qn->max_peak[i], peak_bw);
                }
+
+               if (node->init_avg || node->init_peak) {
+                       qn->sum_avg[i] = max_t(u64, qn->sum_avg[i], node->init_avg);
+                       qn->max_peak[i] = max_t(u64, qn->max_peak[i], node->init_peak);
+               }
        }
 
        *agg_avg += avg_bw;
@@ -79,7 +84,6 @@ EXPORT_SYMBOL_GPL(qcom_icc_aggregate);
 int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
 {
        struct qcom_icc_provider *qp;
-       struct qcom_icc_node *qn;
        struct icc_node *node;
 
        if (!src)
@@ -88,12 +92,6 @@ int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
                node = src;
 
        qp = to_qcom_provider(node->provider);
-       qn = node->data;
-
-       qn->sum_avg[QCOM_ICC_BUCKET_AMC] = max_t(u64, qn->sum_avg[QCOM_ICC_BUCKET_AMC],
-                                                node->avg_bw);
-       qn->max_peak[QCOM_ICC_BUCKET_AMC] = max_t(u64, qn->max_peak[QCOM_ICC_BUCKET_AMC],
-                                                 node->peak_bw);
 
        qcom_icc_bcm_voter_commit(qp->voter);
 
index 98ba927..6f0df62 100644 (file)
@@ -768,6 +768,7 @@ static void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
        __iommu_dma_unmap(dev, sgt->sgl->dma_address, size);
        __iommu_dma_free_pages(sh->pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
        sg_free_table(&sh->sgt);
+       kfree(sh);
 }
 #endif /* CONFIG_DMA_REMAP */
 
index c6cf44a..9ec374e 100644 (file)
@@ -511,7 +511,7 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
                                 u32 pasid, bool fault_ignore)
 {
        struct pasid_entry *pte;
-       u16 did;
+       u16 did, pgtt;
 
        pte = intel_pasid_get_entry(dev, pasid);
        if (WARN_ON(!pte))
@@ -521,13 +521,19 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
                return;
 
        did = pasid_get_domain_id(pte);
+       pgtt = pasid_pte_get_pgtt(pte);
+
        intel_pasid_clear_entry(dev, pasid, fault_ignore);
 
        if (!ecap_coherent(iommu->ecap))
                clflush_cache_range(pte, sizeof(*pte));
 
        pasid_cache_invalidation_with_pasid(iommu, did, pasid);
-       qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
+
+       if (pgtt == PASID_ENTRY_PGTT_PT || pgtt == PASID_ENTRY_PGTT_FL_ONLY)
+               qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
+       else
+               iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
 
        /* Device IOTLB doesn't need to be flushed in caching mode. */
        if (!cap_caching_mode(iommu->cap))
index 5ff61c3..c11bc8b 100644 (file)
@@ -99,6 +99,12 @@ static inline bool pasid_pte_is_present(struct pasid_entry *pte)
        return READ_ONCE(pte->val[0]) & PASID_PTE_PRESENT;
 }
 
+/* Get PGTT field of a PASID table entry */
+static inline u16 pasid_pte_get_pgtt(struct pasid_entry *pte)
+{
+       return (u16)((READ_ONCE(pte->val[0]) >> 6) & 0x7);
+}
+
 extern unsigned int intel_pasid_max_id;
 int intel_pasid_alloc_table(struct device *dev);
 void intel_pasid_free_table(struct device *dev);
index 9b0f22b..4b9b3f3 100644 (file)
@@ -675,7 +675,6 @@ static int intel_svm_unbind_mm(struct device *dev, u32 pasid)
                        kfree_rcu(sdev, rcu);
 
                        if (list_empty(&svm->devs)) {
-                               intel_svm_free_pasid(mm);
                                if (svm->notifier.ops) {
                                        mmu_notifier_unregister(&svm->notifier, mm);
                                        /* Clear mm's pasid. */
@@ -690,6 +689,8 @@ static int intel_svm_unbind_mm(struct device *dev, u32 pasid)
                                kfree(svm);
                        }
                }
+               /* Drop a PASID reference and free it if no reference. */
+               intel_svm_free_pasid(mm);
        }
 out:
        return ret;
index 5419c4b..63f0af1 100644 (file)
@@ -924,6 +924,9 @@ void iommu_group_remove_device(struct device *dev)
        struct iommu_group *group = dev->iommu_group;
        struct group_device *tmp_device, *device = NULL;
 
+       if (!group)
+               return;
+
        dev_info(dev, "Removing from iommu group %d\n", group->id);
 
        /* Pre-notify listeners that a device is being removed. */
index 6019e58..83df387 100644 (file)
@@ -90,7 +90,7 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
        struct zpci_dev *zdev = to_zpci_dev(dev);
        struct s390_domain_device *domain_device;
        unsigned long flags;
-       int rc;
+       int cc, rc;
 
        if (!zdev)
                return -ENODEV;
@@ -99,14 +99,21 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
        if (!domain_device)
                return -ENOMEM;
 
-       if (zdev->dma_table)
-               zpci_dma_exit_device(zdev);
+       if (zdev->dma_table) {
+               cc = zpci_dma_exit_device(zdev);
+               if (cc) {
+                       rc = -EIO;
+                       goto out_free;
+               }
+       }
 
        zdev->dma_table = s390_domain->dma_table;
-       rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
+       cc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
                                (u64) zdev->dma_table);
-       if (rc)
+       if (cc) {
+               rc = -EIO;
                goto out_restore;
+       }
 
        spin_lock_irqsave(&s390_domain->list_lock, flags);
        /* First device defines the DMA range limits */
@@ -130,6 +137,7 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
 
 out_restore:
        zpci_dma_init_device(zdev);
+out_free:
        kfree(domain_device);
 
        return rc;
index 3461b0a..cbfdade 100644 (file)
@@ -89,16 +89,13 @@ static void tpci200_unregister(struct tpci200_board *tpci200)
        free_irq(tpci200->info->pdev->irq, (void *) tpci200);
 
        pci_iounmap(tpci200->info->pdev, tpci200->info->interface_regs);
-       pci_iounmap(tpci200->info->pdev, tpci200->info->cfg_regs);
 
        pci_release_region(tpci200->info->pdev, TPCI200_IP_INTERFACE_BAR);
        pci_release_region(tpci200->info->pdev, TPCI200_IO_ID_INT_SPACES_BAR);
        pci_release_region(tpci200->info->pdev, TPCI200_MEM16_SPACE_BAR);
        pci_release_region(tpci200->info->pdev, TPCI200_MEM8_SPACE_BAR);
-       pci_release_region(tpci200->info->pdev, TPCI200_CFG_MEM_BAR);
 
        pci_disable_device(tpci200->info->pdev);
-       pci_dev_put(tpci200->info->pdev);
 }
 
 static void tpci200_enable_irq(struct tpci200_board *tpci200,
@@ -257,7 +254,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
                        "(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 2 !",
                        tpci200->info->pdev->bus->number,
                        tpci200->info->pdev->devfn);
-               goto out_disable_pci;
+               goto err_disable_device;
        }
 
        /* Request IO ID INT space (Bar 3) */
@@ -269,7 +266,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
                        "(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 3 !",
                        tpci200->info->pdev->bus->number,
                        tpci200->info->pdev->devfn);
-               goto out_release_ip_space;
+               goto err_ip_interface_bar;
        }
 
        /* Request MEM8 space (Bar 5) */
@@ -280,7 +277,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
                        "(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 5!",
                        tpci200->info->pdev->bus->number,
                        tpci200->info->pdev->devfn);
-               goto out_release_ioid_int_space;
+               goto err_io_id_int_spaces_bar;
        }
 
        /* Request MEM16 space (Bar 4) */
@@ -291,7 +288,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
                        "(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 4!",
                        tpci200->info->pdev->bus->number,
                        tpci200->info->pdev->devfn);
-               goto out_release_mem8_space;
+               goto err_mem8_space_bar;
        }
 
        /* Map internal tpci200 driver user space */
@@ -305,7 +302,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
                        tpci200->info->pdev->bus->number,
                        tpci200->info->pdev->devfn);
                res = -ENOMEM;
-               goto out_release_mem8_space;
+               goto err_mem16_space_bar;
        }
 
        /* Initialize lock that protects interface_regs */
@@ -344,18 +341,22 @@ static int tpci200_register(struct tpci200_board *tpci200)
                        "(bn 0x%X, sn 0x%X) unable to register IRQ !",
                        tpci200->info->pdev->bus->number,
                        tpci200->info->pdev->devfn);
-               goto out_release_ioid_int_space;
+               goto err_interface_regs;
        }
 
        return 0;
 
-out_release_mem8_space:
+err_interface_regs:
+       pci_iounmap(tpci200->info->pdev, tpci200->info->interface_regs);
+err_mem16_space_bar:
+       pci_release_region(tpci200->info->pdev, TPCI200_MEM16_SPACE_BAR);
+err_mem8_space_bar:
        pci_release_region(tpci200->info->pdev, TPCI200_MEM8_SPACE_BAR);
-out_release_ioid_int_space:
+err_io_id_int_spaces_bar:
        pci_release_region(tpci200->info->pdev, TPCI200_IO_ID_INT_SPACES_BAR);
-out_release_ip_space:
+err_ip_interface_bar:
        pci_release_region(tpci200->info->pdev, TPCI200_IP_INTERFACE_BAR);
-out_disable_pci:
+err_disable_device:
        pci_disable_device(tpci200->info->pdev);
        return res;
 }
@@ -527,7 +528,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
        tpci200->info = kzalloc(sizeof(struct tpci200_infos), GFP_KERNEL);
        if (!tpci200->info) {
                ret = -ENOMEM;
-               goto out_err_info;
+               goto err_tpci200;
        }
 
        pci_dev_get(pdev);
@@ -538,7 +539,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
        if (ret) {
                dev_err(&pdev->dev, "Failed to allocate PCI Configuration Memory");
                ret = -EBUSY;
-               goto out_err_pci_request;
+               goto err_tpci200_info;
        }
        tpci200->info->cfg_regs = ioremap(
                        pci_resource_start(pdev, TPCI200_CFG_MEM_BAR),
@@ -546,7 +547,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
        if (!tpci200->info->cfg_regs) {
                dev_err(&pdev->dev, "Failed to map PCI Configuration Memory");
                ret = -EFAULT;
-               goto out_err_ioremap;
+               goto err_request_region;
        }
 
        /* Disable byte swapping for 16 bit IP module access. This will ensure
@@ -569,7 +570,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
        if (ret) {
                dev_err(&pdev->dev, "error during tpci200 install\n");
                ret = -ENODEV;
-               goto out_err_install;
+               goto err_cfg_regs;
        }
 
        /* Register the carrier in the industry pack bus driver */
@@ -581,7 +582,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
                dev_err(&pdev->dev,
                        "error registering the carrier on ipack driver\n");
                ret = -EFAULT;
-               goto out_err_bus_register;
+               goto err_tpci200_install;
        }
 
        /* save the bus number given by ipack to logging purpose */
@@ -592,19 +593,16 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
                tpci200_create_device(tpci200, i);
        return 0;
 
-out_err_bus_register:
+err_tpci200_install:
        tpci200_uninstall(tpci200);
-       /* tpci200->info->cfg_regs is unmapped in tpci200_uninstall */
-       tpci200->info->cfg_regs = NULL;
-out_err_install:
-       if (tpci200->info->cfg_regs)
-               iounmap(tpci200->info->cfg_regs);
-out_err_ioremap:
+err_cfg_regs:
+       pci_iounmap(tpci200->info->pdev, tpci200->info->cfg_regs);
+err_request_region:
        pci_release_region(pdev, TPCI200_CFG_MEM_BAR);
-out_err_pci_request:
-       pci_dev_put(pdev);
+err_tpci200_info:
        kfree(tpci200->info);
-out_err_info:
+       pci_dev_put(pdev);
+err_tpci200:
        kfree(tpci200);
        return ret;
 }
@@ -614,6 +612,12 @@ static void __tpci200_pci_remove(struct tpci200_board *tpci200)
        ipack_bus_unregister(tpci200->info->ipack_bus);
        tpci200_uninstall(tpci200);
 
+       pci_iounmap(tpci200->info->pdev, tpci200->info->cfg_regs);
+
+       pci_release_region(tpci200->info->pdev, TPCI200_CFG_MEM_BAR);
+
+       pci_dev_put(tpci200->info->pdev);
+
        kfree(tpci200->info);
        kfree(tpci200);
 }
index ede02dc..5ddb8e5 100644 (file)
@@ -267,9 +267,7 @@ static int alpine_msix_init(struct device_node *node,
                goto err_priv;
        }
 
-       priv->msi_map = kcalloc(BITS_TO_LONGS(priv->num_spis),
-                               sizeof(*priv->msi_map),
-                               GFP_KERNEL);
+       priv->msi_map = bitmap_zalloc(priv->num_spis, GFP_KERNEL);
        if (!priv->msi_map) {
                ret = -ENOMEM;
                goto err_priv;
@@ -285,7 +283,7 @@ static int alpine_msix_init(struct device_node *node,
        return 0;
 
 err_map:
-       kfree(priv->msi_map);
+       bitmap_free(priv->msi_map);
 err_priv:
        kfree(priv);
        return ret;
index b8c06bd..6fc145a 100644 (file)
@@ -226,7 +226,7 @@ static void aic_irq_eoi(struct irq_data *d)
         * Reading the interrupt reason automatically acknowledges and masks
         * the IRQ, so we just unmask it here if needed.
         */
-       if (!irqd_irq_disabled(d) && !irqd_irq_masked(d))
+       if (!irqd_irq_masked(d))
                aic_irq_unmask(d);
 }
 
index be9ea6f..9349fc6 100644 (file)
@@ -269,7 +269,7 @@ static void gicv2m_teardown(void)
 
        list_for_each_entry_safe(v2m, tmp, &v2m_nodes, entry) {
                list_del(&v2m->entry);
-               kfree(v2m->bm);
+               bitmap_free(v2m->bm);
                iounmap(v2m->base);
                of_node_put(to_of_node(v2m->fwnode));
                if (is_fwnode_irqchip(v2m->fwnode))
@@ -386,8 +386,7 @@ static int __init gicv2m_init_one(struct fwnode_handle *fwnode,
                        break;
                }
        }
-       v2m->bm = kcalloc(BITS_TO_LONGS(v2m->nr_spis), sizeof(long),
-                         GFP_KERNEL);
+       v2m->bm = bitmap_zalloc(v2m->nr_spis, GFP_KERNEL);
        if (!v2m->bm) {
                ret = -ENOMEM;
                goto err_iounmap;
index ba39668..7f40dca 100644 (file)
@@ -2140,7 +2140,7 @@ static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids)
        if (err)
                goto out;
 
-       bitmap = kcalloc(BITS_TO_LONGS(nr_irqs), sizeof (long), GFP_ATOMIC);
+       bitmap = bitmap_zalloc(nr_irqs, GFP_ATOMIC);
        if (!bitmap)
                goto out;
 
@@ -2156,7 +2156,7 @@ out:
 static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids)
 {
        WARN_ON(free_lpi_range(base, nr_ids));
-       kfree(bitmap);
+       bitmap_free(bitmap);
 }
 
 static void gic_reset_prop_table(void *va)
@@ -3387,7 +3387,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
        if (!dev || !itt ||  !col_map || (!lpi_map && alloc_lpis)) {
                kfree(dev);
                kfree(itt);
-               kfree(lpi_map);
+               bitmap_free(lpi_map);
                kfree(col_map);
                return NULL;
        }
index e81e89a..b84c9c2 100644 (file)
@@ -290,8 +290,7 @@ int __init mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent)
                if (ret)
                        goto err_free_mbi;
 
-               mbi_ranges[n].bm = kcalloc(BITS_TO_LONGS(mbi_ranges[n].nr_spis),
-                                          sizeof(long), GFP_KERNEL);
+               mbi_ranges[n].bm = bitmap_zalloc(mbi_ranges[n].nr_spis, GFP_KERNEL);
                if (!mbi_ranges[n].bm) {
                        ret = -ENOMEM;
                        goto err_free_mbi;
@@ -329,7 +328,7 @@ int __init mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent)
 err_free_mbi:
        if (mbi_ranges) {
                for (n = 0; n < mbi_range_nr; n++)
-                       kfree(mbi_ranges[n].bm);
+                       bitmap_free(mbi_ranges[n].bm);
                kfree(mbi_ranges);
        }
 
index e0f4deb..fd4e9a3 100644 (file)
@@ -100,6 +100,27 @@ EXPORT_SYMBOL(gic_pmr_sync);
 DEFINE_STATIC_KEY_FALSE(gic_nonsecure_priorities);
 EXPORT_SYMBOL(gic_nonsecure_priorities);
 
+/*
+ * When the Non-secure world has access to group 0 interrupts (as a
+ * consequence of SCR_EL3.FIQ == 0), reading the ICC_RPR_EL1 register will
+ * return the Distributor's view of the interrupt priority.
+ *
+ * When GIC security is enabled (GICD_CTLR.DS == 0), the interrupt priority
+ * written by software is moved to the Non-secure range by the Distributor.
+ *
+ * If both are true (which is when gic_nonsecure_priorities gets enabled),
+ * we need to shift down the priority programmed by software to match it
+ * against the value returned by ICC_RPR_EL1.
+ */
+#define GICD_INT_RPR_PRI(priority)                                     \
+       ({                                                              \
+               u32 __priority = (priority);                            \
+               if (static_branch_unlikely(&gic_nonsecure_priorities))  \
+                       __priority = 0x80 | (__priority >> 1);          \
+                                                                       \
+               __priority;                                             \
+       })
+
 /* ppi_nmi_refs[n] == number of cpus having ppi[n + 16] set as NMI */
 static refcount_t *ppi_nmi_refs;
 
@@ -446,18 +467,23 @@ static void gic_irq_set_prio(struct irq_data *d, u8 prio)
        writeb_relaxed(prio, base + offset + index);
 }
 
-static u32 gic_get_ppi_index(struct irq_data *d)
+static u32 __gic_get_ppi_index(irq_hw_number_t hwirq)
 {
-       switch (get_intid_range(d)) {
+       switch (__get_intid_range(hwirq)) {
        case PPI_RANGE:
-               return d->hwirq - 16;
+               return hwirq - 16;
        case EPPI_RANGE:
-               return d->hwirq - EPPI_BASE_INTID + 16;
+               return hwirq - EPPI_BASE_INTID + 16;
        default:
                unreachable();
        }
 }
 
+static u32 gic_get_ppi_index(struct irq_data *d)
+{
+       return __gic_get_ppi_index(d->hwirq);
+}
+
 static int gic_irq_nmi_setup(struct irq_data *d)
 {
        struct irq_desc *desc = irq_to_desc(d->irq);
@@ -687,7 +713,7 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
                return;
 
        if (gic_supports_nmi() &&
-           unlikely(gic_read_rpr() == GICD_INT_NMI_PRI)) {
+           unlikely(gic_read_rpr() == GICD_INT_RPR_PRI(GICD_INT_NMI_PRI))) {
                gic_handle_nmi(irqnr, regs);
                return;
        }
@@ -1467,10 +1493,34 @@ static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
        }
 }
 
+static bool fwspec_is_partitioned_ppi(struct irq_fwspec *fwspec,
+                                     irq_hw_number_t hwirq)
+{
+       enum gic_intid_range range;
+
+       if (!gic_data.ppi_descs)
+               return false;
+
+       if (!is_of_node(fwspec->fwnode))
+               return false;
+
+       if (fwspec->param_count < 4 || !fwspec->param[3])
+               return false;
+
+       range = __get_intid_range(hwirq);
+       if (range != PPI_RANGE && range != EPPI_RANGE)
+               return false;
+
+       return true;
+}
+
 static int gic_irq_domain_select(struct irq_domain *d,
                                 struct irq_fwspec *fwspec,
                                 enum irq_domain_bus_token bus_token)
 {
+       unsigned int type, ret, ppi_idx;
+       irq_hw_number_t hwirq;
+
        /* Not for us */
         if (fwspec->fwnode != d->fwnode)
                return 0;
@@ -1479,16 +1529,19 @@ static int gic_irq_domain_select(struct irq_domain *d,
        if (!is_of_node(fwspec->fwnode))
                return 1;
 
+       ret = gic_irq_domain_translate(d, fwspec, &hwirq, &type);
+       if (WARN_ON_ONCE(ret))
+               return 0;
+
+       if (!fwspec_is_partitioned_ppi(fwspec, hwirq))
+               return d == gic_data.domain;
+
        /*
         * If this is a PPI and we have a 4th (non-null) parameter,
         * then we need to match the partition domain.
         */
-       if (fwspec->param_count >= 4 &&
-           fwspec->param[0] == 1 && fwspec->param[3] != 0 &&
-           gic_data.ppi_descs)
-               return d == partition_get_domain(gic_data.ppi_descs[fwspec->param[1]]);
-
-       return d == gic_data.domain;
+       ppi_idx = __gic_get_ppi_index(hwirq);
+       return d == partition_get_domain(gic_data.ppi_descs[ppi_idx]);
 }
 
 static const struct irq_domain_ops gic_irq_domain_ops = {
@@ -1503,7 +1556,9 @@ static int partition_domain_translate(struct irq_domain *d,
                                      unsigned long *hwirq,
                                      unsigned int *type)
 {
+       unsigned long ppi_intid;
        struct device_node *np;
+       unsigned int ppi_idx;
        int ret;
 
        if (!gic_data.ppi_descs)
@@ -1513,7 +1568,12 @@ static int partition_domain_translate(struct irq_domain *d,
        if (WARN_ON(!np))
                return -EINVAL;
 
-       ret = partition_translate_id(gic_data.ppi_descs[fwspec->param[1]],
+       ret = gic_irq_domain_translate(d, fwspec, &ppi_intid, type);
+       if (WARN_ON_ONCE(ret))
+               return 0;
+
+       ppi_idx = __gic_get_ppi_index(ppi_intid);
+       ret = partition_translate_id(gic_data.ppi_descs[ppi_idx],
                                     of_node_to_fwnode(np));
        if (ret < 0)
                return ret;
index f790ca6..a4eb8a2 100644 (file)
@@ -92,18 +92,22 @@ static int pch_pic_set_type(struct irq_data *d, unsigned int type)
        case IRQ_TYPE_EDGE_RISING:
                pch_pic_bitset(priv, PCH_PIC_EDGE, d->hwirq);
                pch_pic_bitclr(priv, PCH_PIC_POL, d->hwirq);
+               irq_set_handler_locked(d, handle_edge_irq);
                break;
        case IRQ_TYPE_EDGE_FALLING:
                pch_pic_bitset(priv, PCH_PIC_EDGE, d->hwirq);
                pch_pic_bitset(priv, PCH_PIC_POL, d->hwirq);
+               irq_set_handler_locked(d, handle_edge_irq);
                break;
        case IRQ_TYPE_LEVEL_HIGH:
                pch_pic_bitclr(priv, PCH_PIC_EDGE, d->hwirq);
                pch_pic_bitclr(priv, PCH_PIC_POL, d->hwirq);
+               irq_set_handler_locked(d, handle_level_irq);
                break;
        case IRQ_TYPE_LEVEL_LOW:
                pch_pic_bitclr(priv, PCH_PIC_EDGE, d->hwirq);
                pch_pic_bitset(priv, PCH_PIC_POL, d->hwirq);
+               irq_set_handler_locked(d, handle_level_irq);
                break;
        default:
                ret = -EINVAL;
@@ -113,11 +117,24 @@ static int pch_pic_set_type(struct irq_data *d, unsigned int type)
        return ret;
 }
 
+static void pch_pic_ack_irq(struct irq_data *d)
+{
+       unsigned int reg;
+       struct pch_pic *priv = irq_data_get_irq_chip_data(d);
+
+       reg = readl(priv->base + PCH_PIC_EDGE + PIC_REG_IDX(d->hwirq) * 4);
+       if (reg & BIT(PIC_REG_BIT(d->hwirq))) {
+               writel(BIT(PIC_REG_BIT(d->hwirq)),
+                       priv->base + PCH_PIC_CLR + PIC_REG_IDX(d->hwirq) * 4);
+       }
+       irq_chip_ack_parent(d);
+}
+
 static struct irq_chip pch_pic_irq_chip = {
        .name                   = "PCH PIC",
        .irq_mask               = pch_pic_mask_irq,
        .irq_unmask             = pch_pic_unmask_irq,
-       .irq_ack                = irq_chip_ack_parent,
+       .irq_ack                = pch_pic_ack_irq,
        .irq_set_affinity       = irq_chip_set_affinity_parent,
        .irq_set_type           = pch_pic_set_type,
 };
index 55322da..b4927e4 100644 (file)
@@ -362,10 +362,7 @@ static int ls_scfg_msi_probe(struct platform_device *pdev)
 
        msi_data->irqs_num = MSI_IRQS_PER_MSIR *
                             (1 << msi_data->cfg->ibs_shift);
-       msi_data->used = devm_kcalloc(&pdev->dev,
-                                   BITS_TO_LONGS(msi_data->irqs_num),
-                                   sizeof(*msi_data->used),
-                                   GFP_KERNEL);
+       msi_data->used = devm_bitmap_zalloc(&pdev->dev, msi_data->irqs_num, GFP_KERNEL);
        if (!msi_data->used)
                return -ENOMEM;
        /*
index 6ff98b8..586e52d 100644 (file)
@@ -65,6 +65,7 @@ static struct irq_chip mtk_sysirq_chip = {
        .irq_set_type           = mtk_sysirq_set_type,
        .irq_retrigger          = irq_chip_retrigger_hierarchy,
        .irq_set_affinity       = irq_chip_set_affinity_parent,
+       .flags                  = IRQCHIP_SKIP_SET_WAKE,
 };
 
 static int mtk_sysirq_domain_translate(struct irq_domain *d,
index 3be5c5d..fe88a78 100644 (file)
@@ -210,9 +210,7 @@ static int mvebu_gicp_probe(struct platform_device *pdev)
                gicp->spi_cnt += gicp->spi_ranges[i].count;
        }
 
-       gicp->spi_bitmap = devm_kcalloc(&pdev->dev,
-                               BITS_TO_LONGS(gicp->spi_cnt), sizeof(long),
-                               GFP_KERNEL);
+       gicp->spi_bitmap = devm_bitmap_zalloc(&pdev->dev, gicp->spi_cnt, GFP_KERNEL);
        if (!gicp->spi_bitmap)
                return -ENOMEM;
 
index b4d3678..dc4145a 100644 (file)
@@ -171,8 +171,7 @@ static int __init mvebu_odmi_init(struct device_node *node,
        if (!odmis)
                return -ENOMEM;
 
-       odmis_bm = kcalloc(BITS_TO_LONGS(odmis_count * NODMIS_PER_FRAME),
-                          sizeof(long), GFP_KERNEL);
+       odmis_bm = bitmap_zalloc(odmis_count * NODMIS_PER_FRAME, GFP_KERNEL);
        if (!odmis_bm) {
                ret = -ENOMEM;
                goto err_alloc;
@@ -227,7 +226,7 @@ err_unmap:
                if (odmi->base && !IS_ERR(odmi->base))
                        iounmap(odmis[i].base);
        }
-       kfree(odmis_bm);
+       bitmap_free(odmis_bm);
 err_alloc:
        kfree(odmis);
        return ret;
index 89c23a1..8e76d29 100644 (file)
@@ -215,8 +215,7 @@ struct partition_desc *partition_create_desc(struct fwnode_handle *fwnode,
                goto out;
        desc->domain = d;
 
-       desc->bitmap = kcalloc(BITS_TO_LONGS(nr_parts), sizeof(long),
-                              GFP_KERNEL);
+       desc->bitmap = bitmap_zalloc(nr_parts, GFP_KERNEL);
        if (WARN_ON(!desc->bitmap))
                goto out;
 
index 32d5920..173e652 100644 (file)
@@ -53,26 +53,6 @@ static u32 pdc_reg_read(int reg, u32 i)
        return readl_relaxed(pdc_base + reg + i * sizeof(u32));
 }
 
-static int qcom_pdc_gic_get_irqchip_state(struct irq_data *d,
-                                         enum irqchip_irq_state which,
-                                         bool *state)
-{
-       if (d->hwirq == GPIO_NO_WAKE_IRQ)
-               return 0;
-
-       return irq_chip_get_parent_state(d, which, state);
-}
-
-static int qcom_pdc_gic_set_irqchip_state(struct irq_data *d,
-                                         enum irqchip_irq_state which,
-                                         bool value)
-{
-       if (d->hwirq == GPIO_NO_WAKE_IRQ)
-               return 0;
-
-       return irq_chip_set_parent_state(d, which, value);
-}
-
 static void pdc_enable_intr(struct irq_data *d, bool on)
 {
        int pin_out = d->hwirq;
@@ -91,38 +71,16 @@ static void pdc_enable_intr(struct irq_data *d, bool on)
 
 static void qcom_pdc_gic_disable(struct irq_data *d)
 {
-       if (d->hwirq == GPIO_NO_WAKE_IRQ)
-               return;
-
        pdc_enable_intr(d, false);
        irq_chip_disable_parent(d);
 }
 
 static void qcom_pdc_gic_enable(struct irq_data *d)
 {
-       if (d->hwirq == GPIO_NO_WAKE_IRQ)
-               return;
-
        pdc_enable_intr(d, true);
        irq_chip_enable_parent(d);
 }
 
-static void qcom_pdc_gic_mask(struct irq_data *d)
-{
-       if (d->hwirq == GPIO_NO_WAKE_IRQ)
-               return;
-
-       irq_chip_mask_parent(d);
-}
-
-static void qcom_pdc_gic_unmask(struct irq_data *d)
-{
-       if (d->hwirq == GPIO_NO_WAKE_IRQ)
-               return;
-
-       irq_chip_unmask_parent(d);
-}
-
 /*
  * GIC does not handle falling edge or active low. To allow falling edge and
  * active low interrupts to be handled at GIC, PDC has an inverter that inverts
@@ -159,14 +117,10 @@ enum pdc_irq_config_bits {
  */
 static int qcom_pdc_gic_set_type(struct irq_data *d, unsigned int type)
 {
-       int pin_out = d->hwirq;
        enum pdc_irq_config_bits pdc_type;
        enum pdc_irq_config_bits old_pdc_type;
        int ret;
 
-       if (pin_out == GPIO_NO_WAKE_IRQ)
-               return 0;
-
        switch (type) {
        case IRQ_TYPE_EDGE_RISING:
                pdc_type = PDC_EDGE_RISING;
@@ -191,8 +145,8 @@ static int qcom_pdc_gic_set_type(struct irq_data *d, unsigned int type)
                return -EINVAL;
        }
 
-       old_pdc_type = pdc_reg_read(IRQ_i_CFG, pin_out);
-       pdc_reg_write(IRQ_i_CFG, pin_out, pdc_type);
+       old_pdc_type = pdc_reg_read(IRQ_i_CFG, d->hwirq);
+       pdc_reg_write(IRQ_i_CFG, d->hwirq, pdc_type);
 
        ret = irq_chip_set_type_parent(d, type);
        if (ret)
@@ -216,12 +170,12 @@ static int qcom_pdc_gic_set_type(struct irq_data *d, unsigned int type)
 static struct irq_chip qcom_pdc_gic_chip = {
        .name                   = "PDC",
        .irq_eoi                = irq_chip_eoi_parent,
-       .irq_mask               = qcom_pdc_gic_mask,
-       .irq_unmask             = qcom_pdc_gic_unmask,
+       .irq_mask               = irq_chip_mask_parent,
+       .irq_unmask             = irq_chip_unmask_parent,
        .irq_disable            = qcom_pdc_gic_disable,
        .irq_enable             = qcom_pdc_gic_enable,
-       .irq_get_irqchip_state  = qcom_pdc_gic_get_irqchip_state,
-       .irq_set_irqchip_state  = qcom_pdc_gic_set_irqchip_state,
+       .irq_get_irqchip_state  = irq_chip_get_parent_state,
+       .irq_set_irqchip_state  = irq_chip_set_parent_state,
        .irq_retrigger          = irq_chip_retrigger_hierarchy,
        .irq_set_type           = qcom_pdc_gic_set_type,
        .flags                  = IRQCHIP_MASK_ON_SUSPEND |
@@ -282,7 +236,7 @@ static int qcom_pdc_alloc(struct irq_domain *domain, unsigned int virq,
 
        parent_hwirq = get_parent_hwirq(hwirq);
        if (parent_hwirq == PDC_NO_PARENT_IRQ)
-               return 0;
+               return irq_domain_disconnect_hierarchy(domain->parent, virq);
 
        if (type & IRQ_TYPE_EDGE_BOTH)
                type = IRQ_TYPE_EDGE_RISING;
@@ -319,17 +273,17 @@ static int qcom_pdc_gpio_alloc(struct irq_domain *domain, unsigned int virq,
        if (ret)
                return ret;
 
+       if (hwirq == GPIO_NO_WAKE_IRQ)
+               return irq_domain_disconnect_hierarchy(domain, virq);
+
        ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
                                            &qcom_pdc_gic_chip, NULL);
        if (ret)
                return ret;
 
-       if (hwirq == GPIO_NO_WAKE_IRQ)
-               return 0;
-
        parent_hwirq = get_parent_hwirq(hwirq);
        if (parent_hwirq == PDC_NO_PARENT_IRQ)
-               return 0;
+               return irq_domain_disconnect_hierarchy(domain->parent, virq);
 
        if (type & IRQ_TYPE_EDGE_BOTH)
                type = IRQ_TYPE_EDGE_RISING;
diff --git a/drivers/lightnvm/Kconfig b/drivers/lightnvm/Kconfig
deleted file mode 100644 (file)
index 04caa0f..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Open-Channel SSD NVM configuration
-#
-
-menuconfig NVM
-       bool "Open-Channel SSD target support (DEPRECATED)"
-       depends on BLOCK
-       help
-         Say Y here to get to enable Open-channel SSDs.
-
-         Open-Channel SSDs implement a set of extension to SSDs, that
-         exposes direct access to the underlying non-volatile memory.
-
-         If you say N, all options in this submenu will be skipped and disabled
-         only do this if you know what you are doing.
-
-         This code is deprecated and will be removed in Linux 5.15.
-
-if NVM
-
-config NVM_PBLK
-       tristate "Physical Block Device Open-Channel SSD target"
-       select CRC32
-       help
-         Allows an open-channel SSD to be exposed as a block device to the
-         host. The target assumes the device exposes raw flash and must be
-         explicitly managed by the host.
-
-         Please note the disk format is considered EXPERIMENTAL for now.
-
-if NVM_PBLK
-
-config NVM_PBLK_DEBUG
-       bool "PBlk Debug Support"
-       default n
-       help
-         Enables debug support for pblk. This includes extra checks, more
-         vocal error messages, and extra tracking fields in the pblk sysfs
-         entries.
-
-endif # NVM_PBLK_DEBUG
-
-endif # NVM
diff --git a/drivers/lightnvm/Makefile b/drivers/lightnvm/Makefile
deleted file mode 100644 (file)
index 97d9d7c..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for Open-Channel SSDs.
-#
-
-obj-$(CONFIG_NVM)              := core.o
-obj-$(CONFIG_NVM_PBLK)         += pblk.o
-pblk-y                         := pblk-init.o pblk-core.o pblk-rb.o \
-                                  pblk-write.o pblk-cache.o pblk-read.o \
-                                  pblk-gc.o pblk-recovery.o pblk-map.o \
-                                  pblk-rl.o pblk-sysfs.o
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
deleted file mode 100644 (file)
index cf8a754..0000000
+++ /dev/null
@@ -1,1440 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
- * Initial release: Matias Bjorling <m@bjorling.me>
- */
-
-#define pr_fmt(fmt) "nvm: " fmt
-
-#include <linux/list.h>
-#include <linux/types.h>
-#include <linux/sem.h>
-#include <linux/bitmap.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/miscdevice.h>
-#include <linux/lightnvm.h>
-#include <linux/sched/sysctl.h>
-
-static LIST_HEAD(nvm_tgt_types);
-static DECLARE_RWSEM(nvm_tgtt_lock);
-static LIST_HEAD(nvm_devices);
-static DECLARE_RWSEM(nvm_lock);
-
-/* Map between virtual and physical channel and lun */
-struct nvm_ch_map {
-       int ch_off;
-       int num_lun;
-       int *lun_offs;
-};
-
-struct nvm_dev_map {
-       struct nvm_ch_map *chnls;
-       int num_ch;
-};
-
-static void nvm_free(struct kref *ref);
-
-static struct nvm_target *nvm_find_target(struct nvm_dev *dev, const char *name)
-{
-       struct nvm_target *tgt;
-
-       list_for_each_entry(tgt, &dev->targets, list)
-               if (!strcmp(name, tgt->disk->disk_name))
-                       return tgt;
-
-       return NULL;
-}
-
-static bool nvm_target_exists(const char *name)
-{
-       struct nvm_dev *dev;
-       struct nvm_target *tgt;
-       bool ret = false;
-
-       down_write(&nvm_lock);
-       list_for_each_entry(dev, &nvm_devices, devices) {
-               mutex_lock(&dev->mlock);
-               list_for_each_entry(tgt, &dev->targets, list) {
-                       if (!strcmp(name, tgt->disk->disk_name)) {
-                               ret = true;
-                               mutex_unlock(&dev->mlock);
-                               goto out;
-                       }
-               }
-               mutex_unlock(&dev->mlock);
-       }
-
-out:
-       up_write(&nvm_lock);
-       return ret;
-}
-
-static int nvm_reserve_luns(struct nvm_dev *dev, int lun_begin, int lun_end)
-{
-       int i;
-
-       for (i = lun_begin; i <= lun_end; i++) {
-               if (test_and_set_bit(i, dev->lun_map)) {
-                       pr_err("lun %d already allocated\n", i);
-                       goto err;
-               }
-       }
-
-       return 0;
-err:
-       while (--i >= lun_begin)
-               clear_bit(i, dev->lun_map);
-
-       return -EBUSY;
-}
-
-static void nvm_release_luns_err(struct nvm_dev *dev, int lun_begin,
-                                int lun_end)
-{
-       int i;
-
-       for (i = lun_begin; i <= lun_end; i++)
-               WARN_ON(!test_and_clear_bit(i, dev->lun_map));
-}
-
-static void nvm_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev, int clear)
-{
-       struct nvm_dev *dev = tgt_dev->parent;
-       struct nvm_dev_map *dev_map = tgt_dev->map;
-       int i, j;
-
-       for (i = 0; i < dev_map->num_ch; i++) {
-               struct nvm_ch_map *ch_map = &dev_map->chnls[i];
-               int *lun_offs = ch_map->lun_offs;
-               int ch = i + ch_map->ch_off;
-
-               if (clear) {
-                       for (j = 0; j < ch_map->num_lun; j++) {
-                               int lun = j + lun_offs[j];
-                               int lunid = (ch * dev->geo.num_lun) + lun;
-
-                               WARN_ON(!test_and_clear_bit(lunid,
-                                                       dev->lun_map));
-                       }
-               }
-
-               kfree(ch_map->lun_offs);
-       }
-
-       kfree(dev_map->chnls);
-       kfree(dev_map);
-
-       kfree(tgt_dev->luns);
-       kfree(tgt_dev);
-}
-
-static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
-                                             u16 lun_begin, u16 lun_end,
-                                             u16 op)
-{
-       struct nvm_tgt_dev *tgt_dev = NULL;
-       struct nvm_dev_map *dev_rmap = dev->rmap;
-       struct nvm_dev_map *dev_map;
-       struct ppa_addr *luns;
-       int num_lun = lun_end - lun_begin + 1;
-       int luns_left = num_lun;
-       int num_ch = num_lun / dev->geo.num_lun;
-       int num_ch_mod = num_lun % dev->geo.num_lun;
-       int bch = lun_begin / dev->geo.num_lun;
-       int blun = lun_begin % dev->geo.num_lun;
-       int lunid = 0;
-       int lun_balanced = 1;
-       int sec_per_lun, prev_num_lun;
-       int i, j;
-
-       num_ch = (num_ch_mod == 0) ? num_ch : num_ch + 1;
-
-       dev_map = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
-       if (!dev_map)
-               goto err_dev;
-
-       dev_map->chnls = kcalloc(num_ch, sizeof(struct nvm_ch_map), GFP_KERNEL);
-       if (!dev_map->chnls)
-               goto err_chnls;
-
-       luns = kcalloc(num_lun, sizeof(struct ppa_addr), GFP_KERNEL);
-       if (!luns)
-               goto err_luns;
-
-       prev_num_lun = (luns_left > dev->geo.num_lun) ?
-                                       dev->geo.num_lun : luns_left;
-       for (i = 0; i < num_ch; i++) {
-               struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[i + bch];
-               int *lun_roffs = ch_rmap->lun_offs;
-               struct nvm_ch_map *ch_map = &dev_map->chnls[i];
-               int *lun_offs;
-               int luns_in_chnl = (luns_left > dev->geo.num_lun) ?
-                                       dev->geo.num_lun : luns_left;
-
-               if (lun_balanced && prev_num_lun != luns_in_chnl)
-                       lun_balanced = 0;
-
-               ch_map->ch_off = ch_rmap->ch_off = bch;
-               ch_map->num_lun = luns_in_chnl;
-
-               lun_offs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
-               if (!lun_offs)
-                       goto err_ch;
-
-               for (j = 0; j < luns_in_chnl; j++) {
-                       luns[lunid].ppa = 0;
-                       luns[lunid].a.ch = i;
-                       luns[lunid++].a.lun = j;
-
-                       lun_offs[j] = blun;
-                       lun_roffs[j + blun] = blun;
-               }
-
-               ch_map->lun_offs = lun_offs;
-
-               /* when starting a new channel, lun offset is reset */
-               blun = 0;
-               luns_left -= luns_in_chnl;
-       }
-
-       dev_map->num_ch = num_ch;
-
-       tgt_dev = kmalloc(sizeof(struct nvm_tgt_dev), GFP_KERNEL);
-       if (!tgt_dev)
-               goto err_ch;
-
-       /* Inherit device geometry from parent */
-       memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo));
-
-       /* Target device only owns a portion of the physical device */
-       tgt_dev->geo.num_ch = num_ch;
-       tgt_dev->geo.num_lun = (lun_balanced) ? prev_num_lun : -1;
-       tgt_dev->geo.all_luns = num_lun;
-       tgt_dev->geo.all_chunks = num_lun * dev->geo.num_chk;
-
-       tgt_dev->geo.op = op;
-
-       sec_per_lun = dev->geo.clba * dev->geo.num_chk;
-       tgt_dev->geo.total_secs = num_lun * sec_per_lun;
-
-       tgt_dev->q = dev->q;
-       tgt_dev->map = dev_map;
-       tgt_dev->luns = luns;
-       tgt_dev->parent = dev;
-
-       return tgt_dev;
-err_ch:
-       while (--i >= 0)
-               kfree(dev_map->chnls[i].lun_offs);
-       kfree(luns);
-err_luns:
-       kfree(dev_map->chnls);
-err_chnls:
-       kfree(dev_map);
-err_dev:
-       return tgt_dev;
-}
-
-static struct nvm_tgt_type *__nvm_find_target_type(const char *name)
-{
-       struct nvm_tgt_type *tt;
-
-       list_for_each_entry(tt, &nvm_tgt_types, list)
-               if (!strcmp(name, tt->name))
-                       return tt;
-
-       return NULL;
-}
-
-static struct nvm_tgt_type *nvm_find_target_type(const char *name)
-{
-       struct nvm_tgt_type *tt;
-
-       down_write(&nvm_tgtt_lock);
-       tt = __nvm_find_target_type(name);
-       up_write(&nvm_tgtt_lock);
-
-       return tt;
-}
-
-static int nvm_config_check_luns(struct nvm_geo *geo, int lun_begin,
-                                int lun_end)
-{
-       if (lun_begin > lun_end || lun_end >= geo->all_luns) {
-               pr_err("lun out of bound (%u:%u > %u)\n",
-                       lun_begin, lun_end, geo->all_luns - 1);
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int __nvm_config_simple(struct nvm_dev *dev,
-                              struct nvm_ioctl_create_simple *s)
-{
-       struct nvm_geo *geo = &dev->geo;
-
-       if (s->lun_begin == -1 && s->lun_end == -1) {
-               s->lun_begin = 0;
-               s->lun_end = geo->all_luns - 1;
-       }
-
-       return nvm_config_check_luns(geo, s->lun_begin, s->lun_end);
-}
-
-static int __nvm_config_extended(struct nvm_dev *dev,
-                                struct nvm_ioctl_create_extended *e)
-{
-       if (e->lun_begin == 0xFFFF && e->lun_end == 0xFFFF) {
-               e->lun_begin = 0;
-               e->lun_end = dev->geo.all_luns - 1;
-       }
-
-       /* op not set falls into target's default */
-       if (e->op == 0xFFFF) {
-               e->op = NVM_TARGET_DEFAULT_OP;
-       } else if (e->op < NVM_TARGET_MIN_OP || e->op > NVM_TARGET_MAX_OP) {
-               pr_err("invalid over provisioning value\n");
-               return -EINVAL;
-       }
-
-       return nvm_config_check_luns(&dev->geo, e->lun_begin, e->lun_end);
-}
-
-static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
-{
-       struct nvm_ioctl_create_extended e;
-       struct gendisk *tdisk;
-       struct nvm_tgt_type *tt;
-       struct nvm_target *t;
-       struct nvm_tgt_dev *tgt_dev;
-       void *targetdata;
-       unsigned int mdts;
-       int ret;
-
-       switch (create->conf.type) {
-       case NVM_CONFIG_TYPE_SIMPLE:
-               ret = __nvm_config_simple(dev, &create->conf.s);
-               if (ret)
-                       return ret;
-
-               e.lun_begin = create->conf.s.lun_begin;
-               e.lun_end = create->conf.s.lun_end;
-               e.op = NVM_TARGET_DEFAULT_OP;
-               break;
-       case NVM_CONFIG_TYPE_EXTENDED:
-               ret = __nvm_config_extended(dev, &create->conf.e);
-               if (ret)
-                       return ret;
-
-               e = create->conf.e;
-               break;
-       default:
-               pr_err("config type not valid\n");
-               return -EINVAL;
-       }
-
-       tt = nvm_find_target_type(create->tgttype);
-       if (!tt) {
-               pr_err("target type %s not found\n", create->tgttype);
-               return -EINVAL;
-       }
-
-       if ((tt->flags & NVM_TGT_F_HOST_L2P) != (dev->geo.dom & NVM_RSP_L2P)) {
-               pr_err("device is incompatible with target L2P type.\n");
-               return -EINVAL;
-       }
-
-       if (nvm_target_exists(create->tgtname)) {
-               pr_err("target name already exists (%s)\n",
-                                                       create->tgtname);
-               return -EINVAL;
-       }
-
-       ret = nvm_reserve_luns(dev, e.lun_begin, e.lun_end);
-       if (ret)
-               return ret;
-
-       t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL);
-       if (!t) {
-               ret = -ENOMEM;
-               goto err_reserve;
-       }
-
-       tgt_dev = nvm_create_tgt_dev(dev, e.lun_begin, e.lun_end, e.op);
-       if (!tgt_dev) {
-               pr_err("could not create target device\n");
-               ret = -ENOMEM;
-               goto err_t;
-       }
-
-       tdisk = blk_alloc_disk(dev->q->node);
-       if (!tdisk) {
-               ret = -ENOMEM;
-               goto err_dev;
-       }
-
-       strlcpy(tdisk->disk_name, create->tgtname, sizeof(tdisk->disk_name));
-       tdisk->major = 0;
-       tdisk->first_minor = 0;
-       tdisk->fops = tt->bops;
-
-       targetdata = tt->init(tgt_dev, tdisk, create->flags);
-       if (IS_ERR(targetdata)) {
-               ret = PTR_ERR(targetdata);
-               goto err_init;
-       }
-
-       tdisk->private_data = targetdata;
-       tdisk->queue->queuedata = targetdata;
-
-       mdts = (dev->geo.csecs >> 9) * NVM_MAX_VLBA;
-       if (dev->geo.mdts) {
-               mdts = min_t(u32, dev->geo.mdts,
-                               (dev->geo.csecs >> 9) * NVM_MAX_VLBA);
-       }
-       blk_queue_max_hw_sectors(tdisk->queue, mdts);
-
-       set_capacity(tdisk, tt->capacity(targetdata));
-       add_disk(tdisk);
-
-       if (tt->sysfs_init && tt->sysfs_init(tdisk)) {
-               ret = -ENOMEM;
-               goto err_sysfs;
-       }
-
-       t->type = tt;
-       t->disk = tdisk;
-       t->dev = tgt_dev;
-
-       mutex_lock(&dev->mlock);
-       list_add_tail(&t->list, &dev->targets);
-       mutex_unlock(&dev->mlock);
-
-       __module_get(tt->owner);
-
-       return 0;
-err_sysfs:
-       if (tt->exit)
-               tt->exit(targetdata, true);
-err_init:
-       blk_cleanup_disk(tdisk);
-err_dev:
-       nvm_remove_tgt_dev(tgt_dev, 0);
-err_t:
-       kfree(t);
-err_reserve:
-       nvm_release_luns_err(dev, e.lun_begin, e.lun_end);
-       return ret;
-}
-
-static void __nvm_remove_target(struct nvm_target *t, bool graceful)
-{
-       struct nvm_tgt_type *tt = t->type;
-       struct gendisk *tdisk = t->disk;
-
-       del_gendisk(tdisk);
-
-       if (tt->sysfs_exit)
-               tt->sysfs_exit(tdisk);
-
-       if (tt->exit)
-               tt->exit(tdisk->private_data, graceful);
-
-       nvm_remove_tgt_dev(t->dev, 1);
-       blk_cleanup_disk(tdisk);
-       module_put(t->type->owner);
-
-       list_del(&t->list);
-       kfree(t);
-}
-
-/**
- * nvm_remove_tgt - Removes a target from the media manager
- * @remove:    ioctl structure with target name to remove.
- *
- * Returns:
- * 0: on success
- * 1: on not found
- * <0: on error
- */
-static int nvm_remove_tgt(struct nvm_ioctl_remove *remove)
-{
-       struct nvm_target *t = NULL;
-       struct nvm_dev *dev;
-
-       down_read(&nvm_lock);
-       list_for_each_entry(dev, &nvm_devices, devices) {
-               mutex_lock(&dev->mlock);
-               t = nvm_find_target(dev, remove->tgtname);
-               if (t) {
-                       mutex_unlock(&dev->mlock);
-                       break;
-               }
-               mutex_unlock(&dev->mlock);
-       }
-       up_read(&nvm_lock);
-
-       if (!t) {
-               pr_err("failed to remove target %s\n",
-                               remove->tgtname);
-               return 1;
-       }
-
-       __nvm_remove_target(t, true);
-       kref_put(&dev->ref, nvm_free);
-
-       return 0;
-}
-
-static int nvm_register_map(struct nvm_dev *dev)
-{
-       struct nvm_dev_map *rmap;
-       int i, j;
-
-       rmap = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
-       if (!rmap)
-               goto err_rmap;
-
-       rmap->chnls = kcalloc(dev->geo.num_ch, sizeof(struct nvm_ch_map),
-                                                               GFP_KERNEL);
-       if (!rmap->chnls)
-               goto err_chnls;
-
-       for (i = 0; i < dev->geo.num_ch; i++) {
-               struct nvm_ch_map *ch_rmap;
-               int *lun_roffs;
-               int luns_in_chnl = dev->geo.num_lun;
-
-               ch_rmap = &rmap->chnls[i];
-
-               ch_rmap->ch_off = -1;
-               ch_rmap->num_lun = luns_in_chnl;
-
-               lun_roffs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
-               if (!lun_roffs)
-                       goto err_ch;
-
-               for (j = 0; j < luns_in_chnl; j++)
-                       lun_roffs[j] = -1;
-
-               ch_rmap->lun_offs = lun_roffs;
-       }
-
-       dev->rmap = rmap;
-
-       return 0;
-err_ch:
-       while (--i >= 0)
-               kfree(rmap->chnls[i].lun_offs);
-err_chnls:
-       kfree(rmap);
-err_rmap:
-       return -ENOMEM;
-}
-
-static void nvm_unregister_map(struct nvm_dev *dev)
-{
-       struct nvm_dev_map *rmap = dev->rmap;
-       int i;
-
-       for (i = 0; i < dev->geo.num_ch; i++)
-               kfree(rmap->chnls[i].lun_offs);
-
-       kfree(rmap->chnls);
-       kfree(rmap);
-}
-
-static void nvm_map_to_dev(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
-{
-       struct nvm_dev_map *dev_map = tgt_dev->map;
-       struct nvm_ch_map *ch_map = &dev_map->chnls[p->a.ch];
-       int lun_off = ch_map->lun_offs[p->a.lun];
-
-       p->a.ch += ch_map->ch_off;
-       p->a.lun += lun_off;
-}
-
-static void nvm_map_to_tgt(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
-{
-       struct nvm_dev *dev = tgt_dev->parent;
-       struct nvm_dev_map *dev_rmap = dev->rmap;
-       struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[p->a.ch];
-       int lun_roff = ch_rmap->lun_offs[p->a.lun];
-
-       p->a.ch -= ch_rmap->ch_off;
-       p->a.lun -= lun_roff;
-}
-
-static void nvm_ppa_tgt_to_dev(struct nvm_tgt_dev *tgt_dev,
-                               struct ppa_addr *ppa_list, int nr_ppas)
-{
-       int i;
-
-       for (i = 0; i < nr_ppas; i++) {
-               nvm_map_to_dev(tgt_dev, &ppa_list[i]);
-               ppa_list[i] = generic_to_dev_addr(tgt_dev->parent, ppa_list[i]);
-       }
-}
-
-static void nvm_ppa_dev_to_tgt(struct nvm_tgt_dev *tgt_dev,
-                               struct ppa_addr *ppa_list, int nr_ppas)
-{
-       int i;
-
-       for (i = 0; i < nr_ppas; i++) {
-               ppa_list[i] = dev_to_generic_addr(tgt_dev->parent, ppa_list[i]);
-               nvm_map_to_tgt(tgt_dev, &ppa_list[i]);
-       }
-}
-
-static void nvm_rq_tgt_to_dev(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
-{
-       struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
-
-       nvm_ppa_tgt_to_dev(tgt_dev, ppa_list, rqd->nr_ppas);
-}
-
-static void nvm_rq_dev_to_tgt(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
-{
-       struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
-
-       nvm_ppa_dev_to_tgt(tgt_dev, ppa_list, rqd->nr_ppas);
-}
-
-int nvm_register_tgt_type(struct nvm_tgt_type *tt)
-{
-       int ret = 0;
-
-       down_write(&nvm_tgtt_lock);
-       if (__nvm_find_target_type(tt->name))
-               ret = -EEXIST;
-       else
-               list_add(&tt->list, &nvm_tgt_types);
-       up_write(&nvm_tgtt_lock);
-
-       return ret;
-}
-EXPORT_SYMBOL(nvm_register_tgt_type);
-
-void nvm_unregister_tgt_type(struct nvm_tgt_type *tt)
-{
-       if (!tt)
-               return;
-
-       down_write(&nvm_tgtt_lock);
-       list_del(&tt->list);
-       up_write(&nvm_tgtt_lock);
-}
-EXPORT_SYMBOL(nvm_unregister_tgt_type);
-
-void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
-                                                       dma_addr_t *dma_handler)
-{
-       return dev->ops->dev_dma_alloc(dev, dev->dma_pool, mem_flags,
-                                                               dma_handler);
-}
-EXPORT_SYMBOL(nvm_dev_dma_alloc);
-
-void nvm_dev_dma_free(struct nvm_dev *dev, void *addr, dma_addr_t dma_handler)
-{
-       dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler);
-}
-EXPORT_SYMBOL(nvm_dev_dma_free);
-
-static struct nvm_dev *nvm_find_nvm_dev(const char *name)
-{
-       struct nvm_dev *dev;
-
-       list_for_each_entry(dev, &nvm_devices, devices)
-               if (!strcmp(name, dev->name))
-                       return dev;
-
-       return NULL;
-}
-
-static int nvm_set_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
-                       const struct ppa_addr *ppas, int nr_ppas)
-{
-       struct nvm_dev *dev = tgt_dev->parent;
-       struct nvm_geo *geo = &tgt_dev->geo;
-       int i, plane_cnt, pl_idx;
-       struct ppa_addr ppa;
-
-       if (geo->pln_mode == NVM_PLANE_SINGLE && nr_ppas == 1) {
-               rqd->nr_ppas = nr_ppas;
-               rqd->ppa_addr = ppas[0];
-
-               return 0;
-       }
-
-       rqd->nr_ppas = nr_ppas;
-       rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
-       if (!rqd->ppa_list) {
-               pr_err("failed to allocate dma memory\n");
-               return -ENOMEM;
-       }
-
-       plane_cnt = geo->pln_mode;
-       rqd->nr_ppas *= plane_cnt;
-
-       for (i = 0; i < nr_ppas; i++) {
-               for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
-                       ppa = ppas[i];
-                       ppa.g.pl = pl_idx;
-                       rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppa;
-               }
-       }
-
-       return 0;
-}
-
-static void nvm_free_rqd_ppalist(struct nvm_tgt_dev *tgt_dev,
-                       struct nvm_rq *rqd)
-{
-       if (!rqd->ppa_list)
-               return;
-
-       nvm_dev_dma_free(tgt_dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
-}
-
-static int nvm_set_flags(struct nvm_geo *geo, struct nvm_rq *rqd)
-{
-       int flags = 0;
-
-       if (geo->version == NVM_OCSSD_SPEC_20)
-               return 0;
-
-       if (rqd->is_seq)
-               flags |= geo->pln_mode >> 1;
-
-       if (rqd->opcode == NVM_OP_PREAD)
-               flags |= (NVM_IO_SCRAMBLE_ENABLE | NVM_IO_SUSPEND);
-       else if (rqd->opcode == NVM_OP_PWRITE)
-               flags |= NVM_IO_SCRAMBLE_ENABLE;
-
-       return flags;
-}
-
-int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd, void *buf)
-{
-       struct nvm_dev *dev = tgt_dev->parent;
-       int ret;
-
-       if (!dev->ops->submit_io)
-               return -ENODEV;
-
-       nvm_rq_tgt_to_dev(tgt_dev, rqd);
-
-       rqd->dev = tgt_dev;
-       rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd);
-
-       /* In case of error, fail with right address format */
-       ret = dev->ops->submit_io(dev, rqd, buf);
-       if (ret)
-               nvm_rq_dev_to_tgt(tgt_dev, rqd);
-       return ret;
-}
-EXPORT_SYMBOL(nvm_submit_io);
-
-static void nvm_sync_end_io(struct nvm_rq *rqd)
-{
-       struct completion *waiting = rqd->private;
-
-       complete(waiting);
-}
-
-static int nvm_submit_io_wait(struct nvm_dev *dev, struct nvm_rq *rqd,
-                             void *buf)
-{
-       DECLARE_COMPLETION_ONSTACK(wait);
-       int ret = 0;
-
-       rqd->end_io = nvm_sync_end_io;
-       rqd->private = &wait;
-
-       ret = dev->ops->submit_io(dev, rqd, buf);
-       if (ret)
-               return ret;
-
-       wait_for_completion_io(&wait);
-
-       return 0;
-}
-
-int nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
-                      void *buf)
-{
-       struct nvm_dev *dev = tgt_dev->parent;
-       int ret;
-
-       if (!dev->ops->submit_io)
-               return -ENODEV;
-
-       nvm_rq_tgt_to_dev(tgt_dev, rqd);
-
-       rqd->dev = tgt_dev;
-       rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd);
-
-       ret = nvm_submit_io_wait(dev, rqd, buf);
-
-       return ret;
-}
-EXPORT_SYMBOL(nvm_submit_io_sync);
-
-void nvm_end_io(struct nvm_rq *rqd)
-{
-       struct nvm_tgt_dev *tgt_dev = rqd->dev;
-
-       /* Convert address space */
-       if (tgt_dev)
-               nvm_rq_dev_to_tgt(tgt_dev, rqd);
-
-       if (rqd->end_io)
-               rqd->end_io(rqd);
-}
-EXPORT_SYMBOL(nvm_end_io);
-
-static int nvm_submit_io_sync_raw(struct nvm_dev *dev, struct nvm_rq *rqd)
-{
-       if (!dev->ops->submit_io)
-               return -ENODEV;
-
-       rqd->dev = NULL;
-       rqd->flags = nvm_set_flags(&dev->geo, rqd);
-
-       return nvm_submit_io_wait(dev, rqd, NULL);
-}
-
-static int nvm_bb_chunk_sense(struct nvm_dev *dev, struct ppa_addr ppa)
-{
-       struct nvm_rq rqd = { NULL };
-       struct bio bio;
-       struct bio_vec bio_vec;
-       struct page *page;
-       int ret;
-
-       page = alloc_page(GFP_KERNEL);
-       if (!page)
-               return -ENOMEM;
-
-       bio_init(&bio, &bio_vec, 1);
-       bio_add_page(&bio, page, PAGE_SIZE, 0);
-       bio_set_op_attrs(&bio, REQ_OP_READ, 0);
-
-       rqd.bio = &bio;
-       rqd.opcode = NVM_OP_PREAD;
-       rqd.is_seq = 1;
-       rqd.nr_ppas = 1;
-       rqd.ppa_addr = generic_to_dev_addr(dev, ppa);
-
-       ret = nvm_submit_io_sync_raw(dev, &rqd);
-       __free_page(page);
-       if (ret)
-               return ret;
-
-       return rqd.error;
-}
-
-/*
- * Scans a 1.2 chunk first and last page to determine if its state.
- * If the chunk is found to be open, also scan it to update the write
- * pointer.
- */
-static int nvm_bb_chunk_scan(struct nvm_dev *dev, struct ppa_addr ppa,
-                            struct nvm_chk_meta *meta)
-{
-       struct nvm_geo *geo = &dev->geo;
-       int ret, pg, pl;
-
-       /* sense first page */
-       ret = nvm_bb_chunk_sense(dev, ppa);
-       if (ret < 0) /* io error */
-               return ret;
-       else if (ret == 0) /* valid data */
-               meta->state = NVM_CHK_ST_OPEN;
-       else if (ret > 0) {
-               /*
-                * If empty page, the chunk is free, else it is an
-                * actual io error. In that case, mark it offline.
-                */
-               switch (ret) {
-               case NVM_RSP_ERR_EMPTYPAGE:
-                       meta->state = NVM_CHK_ST_FREE;
-                       return 0;
-               case NVM_RSP_ERR_FAILCRC:
-               case NVM_RSP_ERR_FAILECC:
-               case NVM_RSP_WARN_HIGHECC:
-                       meta->state = NVM_CHK_ST_OPEN;
-                       goto scan;
-               default:
-                       return -ret; /* other io error */
-               }
-       }
-
-       /* sense last page */
-       ppa.g.pg = geo->num_pg - 1;
-       ppa.g.pl = geo->num_pln - 1;
-
-       ret = nvm_bb_chunk_sense(dev, ppa);
-       if (ret < 0) /* io error */
-               return ret;
-       else if (ret == 0) { /* Chunk fully written */
-               meta->state = NVM_CHK_ST_CLOSED;
-               meta->wp = geo->clba;
-               return 0;
-       } else if (ret > 0) {
-               switch (ret) {
-               case NVM_RSP_ERR_EMPTYPAGE:
-               case NVM_RSP_ERR_FAILCRC:
-               case NVM_RSP_ERR_FAILECC:
-               case NVM_RSP_WARN_HIGHECC:
-                       meta->state = NVM_CHK_ST_OPEN;
-                       break;
-               default:
-                       return -ret; /* other io error */
-               }
-       }
-
-scan:
-       /*
-        * chunk is open, we scan sequentially to update the write pointer.
-        * We make the assumption that targets write data across all planes
-        * before moving to the next page.
-        */
-       for (pg = 0; pg < geo->num_pg; pg++) {
-               for (pl = 0; pl < geo->num_pln; pl++) {
-                       ppa.g.pg = pg;
-                       ppa.g.pl = pl;
-
-                       ret = nvm_bb_chunk_sense(dev, ppa);
-                       if (ret < 0) /* io error */
-                               return ret;
-                       else if (ret == 0) {
-                               meta->wp += geo->ws_min;
-                       } else if (ret > 0) {
-                               switch (ret) {
-                               case NVM_RSP_ERR_EMPTYPAGE:
-                                       return 0;
-                               case NVM_RSP_ERR_FAILCRC:
-                               case NVM_RSP_ERR_FAILECC:
-                               case NVM_RSP_WARN_HIGHECC:
-                                       meta->wp += geo->ws_min;
-                                       break;
-                               default:
-                                       return -ret; /* other io error */
-                               }
-                       }
-               }
-       }
-
-       return 0;
-}
-
-/*
- * folds a bad block list from its plane representation to its
- * chunk representation.
- *
- * If any of the planes status are bad or grown bad, the chunk is marked
- * offline. If not bad, the first plane state acts as the chunk state.
- */
-static int nvm_bb_to_chunk(struct nvm_dev *dev, struct ppa_addr ppa,
-                          u8 *blks, int nr_blks, struct nvm_chk_meta *meta)
-{
-       struct nvm_geo *geo = &dev->geo;
-       int ret, blk, pl, offset, blktype;
-
-       for (blk = 0; blk < geo->num_chk; blk++) {
-               offset = blk * geo->pln_mode;
-               blktype = blks[offset];
-
-               for (pl = 0; pl < geo->pln_mode; pl++) {
-                       if (blks[offset + pl] &
-                                       (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
-                               blktype = blks[offset + pl];
-                               break;
-                       }
-               }
-
-               ppa.g.blk = blk;
-
-               meta->wp = 0;
-               meta->type = NVM_CHK_TP_W_SEQ;
-               meta->wi = 0;
-               meta->slba = generic_to_dev_addr(dev, ppa).ppa;
-               meta->cnlb = dev->geo.clba;
-
-               if (blktype == NVM_BLK_T_FREE) {
-                       ret = nvm_bb_chunk_scan(dev, ppa, meta);
-                       if (ret)
-                               return ret;
-               } else {
-                       meta->state = NVM_CHK_ST_OFFLINE;
-               }
-
-               meta++;
-       }
-
-       return 0;
-}
-
-static int nvm_get_bb_meta(struct nvm_dev *dev, sector_t slba,
-                          int nchks, struct nvm_chk_meta *meta)
-{
-       struct nvm_geo *geo = &dev->geo;
-       struct ppa_addr ppa;
-       u8 *blks;
-       int ch, lun, nr_blks;
-       int ret = 0;
-
-       ppa.ppa = slba;
-       ppa = dev_to_generic_addr(dev, ppa);
-
-       if (ppa.g.blk != 0)
-               return -EINVAL;
-
-       if ((nchks % geo->num_chk) != 0)
-               return -EINVAL;
-
-       nr_blks = geo->num_chk * geo->pln_mode;
-
-       blks = kmalloc(nr_blks, GFP_KERNEL);
-       if (!blks)
-               return -ENOMEM;
-
-       for (ch = ppa.g.ch; ch < geo->num_ch; ch++) {
-               for (lun = ppa.g.lun; lun < geo->num_lun; lun++) {
-                       struct ppa_addr ppa_gen, ppa_dev;
-
-                       if (!nchks)
-                               goto done;
-
-                       ppa_gen.ppa = 0;
-                       ppa_gen.g.ch = ch;
-                       ppa_gen.g.lun = lun;
-                       ppa_dev = generic_to_dev_addr(dev, ppa_gen);
-
-                       ret = dev->ops->get_bb_tbl(dev, ppa_dev, blks);
-                       if (ret)
-                               goto done;
-
-                       ret = nvm_bb_to_chunk(dev, ppa_gen, blks, nr_blks,
-                                                                       meta);
-                       if (ret)
-                               goto done;
-
-                       meta += geo->num_chk;
-                       nchks -= geo->num_chk;
-               }
-       }
-done:
-       kfree(blks);
-       return ret;
-}
-
-int nvm_get_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa,
-                      int nchks, struct nvm_chk_meta *meta)
-{
-       struct nvm_dev *dev = tgt_dev->parent;
-
-       nvm_ppa_tgt_to_dev(tgt_dev, &ppa, 1);
-
-       if (dev->geo.version == NVM_OCSSD_SPEC_12)
-               return nvm_get_bb_meta(dev, (sector_t)ppa.ppa, nchks, meta);
-
-       return dev->ops->get_chk_meta(dev, (sector_t)ppa.ppa, nchks, meta);
-}
-EXPORT_SYMBOL_GPL(nvm_get_chunk_meta);
-
-int nvm_set_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
-                      int nr_ppas, int type)
-{
-       struct nvm_dev *dev = tgt_dev->parent;
-       struct nvm_rq rqd;
-       int ret;
-
-       if (dev->geo.version == NVM_OCSSD_SPEC_20)
-               return 0;
-
-       if (nr_ppas > NVM_MAX_VLBA) {
-               pr_err("unable to update all blocks atomically\n");
-               return -EINVAL;
-       }
-
-       memset(&rqd, 0, sizeof(struct nvm_rq));
-
-       nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas);
-       nvm_rq_tgt_to_dev(tgt_dev, &rqd);
-
-       ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
-       nvm_free_rqd_ppalist(tgt_dev, &rqd);
-       if (ret)
-               return -EINVAL;
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(nvm_set_chunk_meta);
-
-static int nvm_core_init(struct nvm_dev *dev)
-{
-       struct nvm_geo *geo = &dev->geo;
-       int ret;
-
-       dev->lun_map = kcalloc(BITS_TO_LONGS(geo->all_luns),
-                                       sizeof(unsigned long), GFP_KERNEL);
-       if (!dev->lun_map)
-               return -ENOMEM;
-
-       INIT_LIST_HEAD(&dev->area_list);
-       INIT_LIST_HEAD(&dev->targets);
-       mutex_init(&dev->mlock);
-       spin_lock_init(&dev->lock);
-
-       ret = nvm_register_map(dev);
-       if (ret)
-               goto err_fmtype;
-
-       return 0;
-err_fmtype:
-       kfree(dev->lun_map);
-       return ret;
-}
-
-static void nvm_free(struct kref *ref)
-{
-       struct nvm_dev *dev = container_of(ref, struct nvm_dev, ref);
-
-       if (dev->dma_pool)
-               dev->ops->destroy_dma_pool(dev->dma_pool);
-
-       if (dev->rmap)
-               nvm_unregister_map(dev);
-
-       kfree(dev->lun_map);
-       kfree(dev);
-}
-
-static int nvm_init(struct nvm_dev *dev)
-{
-       struct nvm_geo *geo = &dev->geo;
-       int ret = -EINVAL;
-
-       if (dev->ops->identity(dev)) {
-               pr_err("device could not be identified\n");
-               goto err;
-       }
-
-       pr_debug("ver:%u.%u nvm_vendor:%x\n", geo->major_ver_id,
-                       geo->minor_ver_id, geo->vmnt);
-
-       ret = nvm_core_init(dev);
-       if (ret) {
-               pr_err("could not initialize core structures.\n");
-               goto err;
-       }
-
-       pr_info("registered %s [%u/%u/%u/%u/%u]\n",
-                       dev->name, dev->geo.ws_min, dev->geo.ws_opt,
-                       dev->geo.num_chk, dev->geo.all_luns,
-                       dev->geo.num_ch);
-       return 0;
-err:
-       pr_err("failed to initialize nvm\n");
-       return ret;
-}
-
-struct nvm_dev *nvm_alloc_dev(int node)
-{
-       struct nvm_dev *dev;
-
-       dev = kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node);
-       if (dev)
-               kref_init(&dev->ref);
-
-       return dev;
-}
-EXPORT_SYMBOL(nvm_alloc_dev);
-
-int nvm_register(struct nvm_dev *dev)
-{
-       int ret, exp_pool_size;
-
-       pr_warn_once("lightnvm support is deprecated and will be removed in Linux 5.15.\n");
-
-       if (!dev->q || !dev->ops) {
-               kref_put(&dev->ref, nvm_free);
-               return -EINVAL;
-       }
-
-       ret = nvm_init(dev);
-       if (ret) {
-               kref_put(&dev->ref, nvm_free);
-               return ret;
-       }
-
-       exp_pool_size = max_t(int, PAGE_SIZE,
-                             (NVM_MAX_VLBA * (sizeof(u64) + dev->geo.sos)));
-       exp_pool_size = round_up(exp_pool_size, PAGE_SIZE);
-
-       dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist",
-                                                 exp_pool_size);
-       if (!dev->dma_pool) {
-               pr_err("could not create dma pool\n");
-               kref_put(&dev->ref, nvm_free);
-               return -ENOMEM;
-       }
-
-       /* register device with a supported media manager */
-       down_write(&nvm_lock);
-       list_add(&dev->devices, &nvm_devices);
-       up_write(&nvm_lock);
-
-       return 0;
-}
-EXPORT_SYMBOL(nvm_register);
-
-void nvm_unregister(struct nvm_dev *dev)
-{
-       struct nvm_target *t, *tmp;
-
-       mutex_lock(&dev->mlock);
-       list_for_each_entry_safe(t, tmp, &dev->targets, list) {
-               if (t->dev->parent != dev)
-                       continue;
-               __nvm_remove_target(t, false);
-               kref_put(&dev->ref, nvm_free);
-       }
-       mutex_unlock(&dev->mlock);
-
-       down_write(&nvm_lock);
-       list_del(&dev->devices);
-       up_write(&nvm_lock);
-
-       kref_put(&dev->ref, nvm_free);
-}
-EXPORT_SYMBOL(nvm_unregister);
-
-static int __nvm_configure_create(struct nvm_ioctl_create *create)
-{
-       struct nvm_dev *dev;
-       int ret;
-
-       down_write(&nvm_lock);
-       dev = nvm_find_nvm_dev(create->dev);
-       up_write(&nvm_lock);
-
-       if (!dev) {
-               pr_err("device not found\n");
-               return -EINVAL;
-       }
-
-       kref_get(&dev->ref);
-       ret = nvm_create_tgt(dev, create);
-       if (ret)
-               kref_put(&dev->ref, nvm_free);
-
-       return ret;
-}
-
-static long nvm_ioctl_info(struct file *file, void __user *arg)
-{
-       struct nvm_ioctl_info *info;
-       struct nvm_tgt_type *tt;
-       int tgt_iter = 0;
-
-       info = memdup_user(arg, sizeof(struct nvm_ioctl_info));
-       if (IS_ERR(info))
-               return PTR_ERR(info);
-
-       info->version[0] = NVM_VERSION_MAJOR;
-       info->version[1] = NVM_VERSION_MINOR;
-       info->version[2] = NVM_VERSION_PATCH;
-
-       down_write(&nvm_tgtt_lock);
-       list_for_each_entry(tt, &nvm_tgt_types, list) {
-               struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];
-
-               tgt->version[0] = tt->version[0];
-               tgt->version[1] = tt->version[1];
-               tgt->version[2] = tt->version[2];
-               strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX);
-
-               tgt_iter++;
-       }
-
-       info->tgtsize = tgt_iter;
-       up_write(&nvm_tgtt_lock);
-
-       if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) {
-               kfree(info);
-               return -EFAULT;
-       }
-
-       kfree(info);
-       return 0;
-}
-
-static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
-{
-       struct nvm_ioctl_get_devices *devices;
-       struct nvm_dev *dev;
-       int i = 0;
-
-       devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL);
-       if (!devices)
-               return -ENOMEM;
-
-       down_write(&nvm_lock);
-       list_for_each_entry(dev, &nvm_devices, devices) {
-               struct nvm_ioctl_device_info *info = &devices->info[i];
-
-               strlcpy(info->devname, dev->name, sizeof(info->devname));
-
-               /* kept for compatibility */
-               info->bmversion[0] = 1;
-               info->bmversion[1] = 0;
-               info->bmversion[2] = 0;
-               strlcpy(info->bmname, "gennvm", sizeof(info->bmname));
-               i++;
-
-               if (i >= ARRAY_SIZE(devices->info)) {
-                       pr_err("max %zd devices can be reported.\n",
-                              ARRAY_SIZE(devices->info));
-                       break;
-               }
-       }
-       up_write(&nvm_lock);
-
-       devices->nr_devices = i;
-
-       if (copy_to_user(arg, devices,
-                        sizeof(struct nvm_ioctl_get_devices))) {
-               kfree(devices);
-               return -EFAULT;
-       }
-
-       kfree(devices);
-       return 0;
-}
-
-static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
-{
-       struct nvm_ioctl_create create;
-
-       if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
-               return -EFAULT;
-
-       if (create.conf.type == NVM_CONFIG_TYPE_EXTENDED &&
-           create.conf.e.rsv != 0) {
-               pr_err("reserved config field in use\n");
-               return -EINVAL;
-       }
-
-       create.dev[DISK_NAME_LEN - 1] = '\0';
-       create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
-       create.tgtname[DISK_NAME_LEN - 1] = '\0';
-
-       if (create.flags != 0) {
-               __u32 flags = create.flags;
-
-               /* Check for valid flags */
-               if (flags & NVM_TARGET_FACTORY)
-                       flags &= ~NVM_TARGET_FACTORY;
-
-               if (flags) {
-                       pr_err("flag not supported\n");
-                       return -EINVAL;
-               }
-       }
-
-       return __nvm_configure_create(&create);
-}
-
-static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
-{
-       struct nvm_ioctl_remove remove;
-
-       if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove)))
-               return -EFAULT;
-
-       remove.tgtname[DISK_NAME_LEN - 1] = '\0';
-
-       if (remove.flags != 0) {
-               pr_err("no flags supported\n");
-               return -EINVAL;
-       }
-
-       return nvm_remove_tgt(&remove);
-}
-
-/* kept for compatibility reasons */
-static long nvm_ioctl_dev_init(struct file *file, void __user *arg)
-{
-       struct nvm_ioctl_dev_init init;
-
-       if (copy_from_user(&init, arg, sizeof(struct nvm_ioctl_dev_init)))
-               return -EFAULT;
-
-       if (init.flags != 0) {
-               pr_err("no flags supported\n");
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-/* Kept for compatibility reasons */
-static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
-{
-       struct nvm_ioctl_dev_factory fact;
-
-       if (copy_from_user(&fact, arg, sizeof(struct nvm_ioctl_dev_factory)))
-               return -EFAULT;
-
-       fact.dev[DISK_NAME_LEN - 1] = '\0';
-
-       if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1))
-               return -EINVAL;
-
-       return 0;
-}
-
-static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
-{
-       void __user *argp = (void __user *)arg;
-
-       if (!capable(CAP_SYS_ADMIN))
-               return -EPERM;
-
-       switch (cmd) {
-       case NVM_INFO:
-               return nvm_ioctl_info(file, argp);
-       case NVM_GET_DEVICES:
-               return nvm_ioctl_get_devices(file, argp);
-       case NVM_DEV_CREATE:
-               return nvm_ioctl_dev_create(file, argp);
-       case NVM_DEV_REMOVE:
-               return nvm_ioctl_dev_remove(file, argp);
-       case NVM_DEV_INIT:
-               return nvm_ioctl_dev_init(file, argp);
-       case NVM_DEV_FACTORY:
-               return nvm_ioctl_dev_factory(file, argp);
-       }
-       return 0;
-}
-
-static const struct file_operations _ctl_fops = {
-       .open = nonseekable_open,
-       .unlocked_ioctl = nvm_ctl_ioctl,
-       .owner = THIS_MODULE,
-       .llseek  = noop_llseek,
-};
-
-static struct miscdevice _nvm_misc = {
-       .minor          = MISC_DYNAMIC_MINOR,
-       .name           = "lightnvm",
-       .nodename       = "lightnvm/control",
-       .fops           = &_ctl_fops,
-};
-builtin_misc_device(_nvm_misc);
diff --git a/drivers/lightnvm/pblk-cache.c b/drivers/lightnvm/pblk-cache.c
deleted file mode 100644 (file)
index f185f1a..0000000
+++ /dev/null
@@ -1,137 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2016 CNEX Labs
- * Initial release: Javier Gonzalez <javier@cnexlabs.com>
- *                  Matias Bjorling <matias@cnexlabs.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * pblk-cache.c - pblk's write cache
- */
-
-#include "pblk.h"
-
-void pblk_write_to_cache(struct pblk *pblk, struct bio *bio,
-                               unsigned long flags)
-{
-       struct pblk_w_ctx w_ctx;
-       sector_t lba = pblk_get_lba(bio);
-       unsigned long start_time;
-       unsigned int bpos, pos;
-       int nr_entries = pblk_get_secs(bio);
-       int i, ret;
-
-       start_time = bio_start_io_acct(bio);
-
-       /* Update the write buffer head (mem) with the entries that we can
-        * write. The write in itself cannot fail, so there is no need to
-        * rollback from here on.
-        */
-retry:
-       ret = pblk_rb_may_write_user(&pblk->rwb, bio, nr_entries, &bpos);
-       switch (ret) {
-       case NVM_IO_REQUEUE:
-               io_schedule();
-               goto retry;
-       case NVM_IO_ERR:
-               pblk_pipeline_stop(pblk);
-               bio_io_error(bio);
-               goto out;
-       }
-
-       pblk_ppa_set_empty(&w_ctx.ppa);
-       w_ctx.flags = flags;
-       if (bio->bi_opf & REQ_PREFLUSH) {
-               w_ctx.flags |= PBLK_FLUSH_ENTRY;
-               pblk_write_kick(pblk);
-       }
-
-       if (unlikely(!bio_has_data(bio)))
-               goto out;
-
-       for (i = 0; i < nr_entries; i++) {
-               void *data = bio_data(bio);
-
-               w_ctx.lba = lba + i;
-
-               pos = pblk_rb_wrap_pos(&pblk->rwb, bpos + i);
-               pblk_rb_write_entry_user(&pblk->rwb, data, w_ctx, pos);
-
-               bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
-       }
-
-       atomic64_add(nr_entries, &pblk->user_wa);
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       atomic_long_add(nr_entries, &pblk->inflight_writes);
-       atomic_long_add(nr_entries, &pblk->req_writes);
-#endif
-
-       pblk_rl_inserted(&pblk->rl, nr_entries);
-
-out:
-       bio_end_io_acct(bio, start_time);
-       pblk_write_should_kick(pblk);
-
-       if (ret == NVM_IO_DONE)
-               bio_endio(bio);
-}
-
-/*
- * On GC the incoming lbas are not necessarily sequential. Also, some of the
- * lbas might not be valid entries, which are marked as empty by the GC thread
- */
-int pblk_write_gc_to_cache(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
-{
-       struct pblk_w_ctx w_ctx;
-       unsigned int bpos, pos;
-       void *data = gc_rq->data;
-       int i, valid_entries;
-
-       /* Update the write buffer head (mem) with the entries that we can
-        * write. The write in itself cannot fail, so there is no need to
-        * rollback from here on.
-        */
-retry:
-       if (!pblk_rb_may_write_gc(&pblk->rwb, gc_rq->secs_to_gc, &bpos)) {
-               io_schedule();
-               goto retry;
-       }
-
-       w_ctx.flags = PBLK_IOTYPE_GC;
-       pblk_ppa_set_empty(&w_ctx.ppa);
-
-       for (i = 0, valid_entries = 0; i < gc_rq->nr_secs; i++) {
-               if (gc_rq->lba_list[i] == ADDR_EMPTY)
-                       continue;
-
-               w_ctx.lba = gc_rq->lba_list[i];
-
-               pos = pblk_rb_wrap_pos(&pblk->rwb, bpos + valid_entries);
-               pblk_rb_write_entry_gc(&pblk->rwb, data, w_ctx, gc_rq->line,
-                                               gc_rq->paddr_list[i], pos);
-
-               data += PBLK_EXPOSED_PAGE_SIZE;
-               valid_entries++;
-       }
-
-       WARN_ONCE(gc_rq->secs_to_gc != valid_entries,
-                                       "pblk: inconsistent GC write\n");
-
-       atomic64_add(valid_entries, &pblk->gc_wa);
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       atomic_long_add(valid_entries, &pblk->inflight_writes);
-       atomic_long_add(valid_entries, &pblk->recov_gc_writes);
-#endif
-
-       pblk_write_should_kick(pblk);
-       return NVM_IO_OK;
-}
diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c
deleted file mode 100644 (file)
index 33d39d3..0000000
+++ /dev/null
@@ -1,2151 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2016 CNEX Labs
- * Initial release: Javier Gonzalez <javier@cnexlabs.com>
- *                  Matias Bjorling <matias@cnexlabs.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * pblk-core.c - pblk's core functionality
- *
- */
-
-#define CREATE_TRACE_POINTS
-
-#include "pblk.h"
-#include "pblk-trace.h"
-
-static void pblk_line_mark_bb(struct work_struct *work)
-{
-       struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
-                                                                       ws);
-       struct pblk *pblk = line_ws->pblk;
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct ppa_addr *ppa = line_ws->priv;
-       int ret;
-
-       ret = nvm_set_chunk_meta(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
-       if (ret) {
-               struct pblk_line *line;
-               int pos;
-
-               line = pblk_ppa_to_line(pblk, *ppa);
-               pos = pblk_ppa_to_pos(&dev->geo, *ppa);
-
-               pblk_err(pblk, "failed to mark bb, line:%d, pos:%d\n",
-                               line->id, pos);
-       }
-
-       kfree(ppa);
-       mempool_free(line_ws, &pblk->gen_ws_pool);
-}
-
-static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
-                        struct ppa_addr ppa_addr)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct ppa_addr *ppa;
-       int pos = pblk_ppa_to_pos(geo, ppa_addr);
-
-       pblk_debug(pblk, "erase failed: line:%d, pos:%d\n", line->id, pos);
-       atomic_long_inc(&pblk->erase_failed);
-
-       atomic_dec(&line->blk_in_line);
-       if (test_and_set_bit(pos, line->blk_bitmap))
-               pblk_err(pblk, "attempted to erase bb: line:%d, pos:%d\n",
-                                                       line->id, pos);
-
-       /* Not necessary to mark bad blocks on 2.0 spec. */
-       if (geo->version == NVM_OCSSD_SPEC_20)
-               return;
-
-       ppa = kmalloc(sizeof(struct ppa_addr), GFP_ATOMIC);
-       if (!ppa)
-               return;
-
-       *ppa = ppa_addr;
-       pblk_gen_run_ws(pblk, NULL, ppa, pblk_line_mark_bb,
-                                               GFP_ATOMIC, pblk->bb_wq);
-}
-
-static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct nvm_chk_meta *chunk;
-       struct pblk_line *line;
-       int pos;
-
-       line = pblk_ppa_to_line(pblk, rqd->ppa_addr);
-       pos = pblk_ppa_to_pos(geo, rqd->ppa_addr);
-       chunk = &line->chks[pos];
-
-       atomic_dec(&line->left_seblks);
-
-       if (rqd->error) {
-               trace_pblk_chunk_reset(pblk_disk_name(pblk),
-                               &rqd->ppa_addr, PBLK_CHUNK_RESET_FAILED);
-
-               chunk->state = NVM_CHK_ST_OFFLINE;
-               pblk_mark_bb(pblk, line, rqd->ppa_addr);
-       } else {
-               trace_pblk_chunk_reset(pblk_disk_name(pblk),
-                               &rqd->ppa_addr, PBLK_CHUNK_RESET_DONE);
-
-               chunk->state = NVM_CHK_ST_FREE;
-       }
-
-       trace_pblk_chunk_state(pblk_disk_name(pblk), &rqd->ppa_addr,
-                               chunk->state);
-
-       atomic_dec(&pblk->inflight_io);
-}
-
-/* Erase completion assumes that only one block is erased at the time */
-static void pblk_end_io_erase(struct nvm_rq *rqd)
-{
-       struct pblk *pblk = rqd->private;
-
-       __pblk_end_io_erase(pblk, rqd);
-       mempool_free(rqd, &pblk->e_rq_pool);
-}
-
-/*
- * Get information for all chunks from the device.
- *
- * The caller is responsible for freeing (vmalloc) the returned structure
- */
-struct nvm_chk_meta *pblk_get_chunk_meta(struct pblk *pblk)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct nvm_chk_meta *meta;
-       struct ppa_addr ppa;
-       unsigned long len;
-       int ret;
-
-       ppa.ppa = 0;
-
-       len = geo->all_chunks * sizeof(*meta);
-       meta = vzalloc(len);
-       if (!meta)
-               return ERR_PTR(-ENOMEM);
-
-       ret = nvm_get_chunk_meta(dev, ppa, geo->all_chunks, meta);
-       if (ret) {
-               vfree(meta);
-               return ERR_PTR(-EIO);
-       }
-
-       return meta;
-}
-
-struct nvm_chk_meta *pblk_chunk_get_off(struct pblk *pblk,
-                                             struct nvm_chk_meta *meta,
-                                             struct ppa_addr ppa)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       int ch_off = ppa.m.grp * geo->num_chk * geo->num_lun;
-       int lun_off = ppa.m.pu * geo->num_chk;
-       int chk_off = ppa.m.chk;
-
-       return meta + ch_off + lun_off + chk_off;
-}
-
-void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
-                          u64 paddr)
-{
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       struct list_head *move_list = NULL;
-
-       /* Lines being reclaimed (GC'ed) cannot be invalidated. Before the L2P
-        * table is modified with reclaimed sectors, a check is done to endure
-        * that newer updates are not overwritten.
-        */
-       spin_lock(&line->lock);
-       WARN_ON(line->state == PBLK_LINESTATE_FREE);
-
-       if (test_and_set_bit(paddr, line->invalid_bitmap)) {
-               WARN_ONCE(1, "pblk: double invalidate\n");
-               spin_unlock(&line->lock);
-               return;
-       }
-       le32_add_cpu(line->vsc, -1);
-
-       if (line->state == PBLK_LINESTATE_CLOSED)
-               move_list = pblk_line_gc_list(pblk, line);
-       spin_unlock(&line->lock);
-
-       if (move_list) {
-               spin_lock(&l_mg->gc_lock);
-               spin_lock(&line->lock);
-               /* Prevent moving a line that has just been chosen for GC */
-               if (line->state == PBLK_LINESTATE_GC) {
-                       spin_unlock(&line->lock);
-                       spin_unlock(&l_mg->gc_lock);
-                       return;
-               }
-               spin_unlock(&line->lock);
-
-               list_move_tail(&line->list, move_list);
-               spin_unlock(&l_mg->gc_lock);
-       }
-}
-
-void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
-{
-       struct pblk_line *line;
-       u64 paddr;
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       /* Callers must ensure that the ppa points to a device address */
-       BUG_ON(pblk_addr_in_cache(ppa));
-       BUG_ON(pblk_ppa_empty(ppa));
-#endif
-
-       line = pblk_ppa_to_line(pblk, ppa);
-       paddr = pblk_dev_ppa_to_line_addr(pblk, ppa);
-
-       __pblk_map_invalidate(pblk, line, paddr);
-}
-
-static void pblk_invalidate_range(struct pblk *pblk, sector_t slba,
-                                 unsigned int nr_secs)
-{
-       sector_t lba;
-
-       spin_lock(&pblk->trans_lock);
-       for (lba = slba; lba < slba + nr_secs; lba++) {
-               struct ppa_addr ppa;
-
-               ppa = pblk_trans_map_get(pblk, lba);
-
-               if (!pblk_addr_in_cache(ppa) && !pblk_ppa_empty(ppa))
-                       pblk_map_invalidate(pblk, ppa);
-
-               pblk_ppa_set_empty(&ppa);
-               pblk_trans_map_set(pblk, lba, ppa);
-       }
-       spin_unlock(&pblk->trans_lock);
-}
-
-int pblk_alloc_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-
-       rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
-                                                       &rqd->dma_meta_list);
-       if (!rqd->meta_list)
-               return -ENOMEM;
-
-       if (rqd->nr_ppas == 1)
-               return 0;
-
-       rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size(pblk);
-       rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size(pblk);
-
-       return 0;
-}
-
-void pblk_free_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-
-       if (rqd->meta_list)
-               nvm_dev_dma_free(dev->parent, rqd->meta_list,
-                               rqd->dma_meta_list);
-}
-
-/* Caller must guarantee that the request is a valid type */
-struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type)
-{
-       mempool_t *pool;
-       struct nvm_rq *rqd;
-       int rq_size;
-
-       switch (type) {
-       case PBLK_WRITE:
-       case PBLK_WRITE_INT:
-               pool = &pblk->w_rq_pool;
-               rq_size = pblk_w_rq_size;
-               break;
-       case PBLK_READ:
-               pool = &pblk->r_rq_pool;
-               rq_size = pblk_g_rq_size;
-               break;
-       default:
-               pool = &pblk->e_rq_pool;
-               rq_size = pblk_g_rq_size;
-       }
-
-       rqd = mempool_alloc(pool, GFP_KERNEL);
-       memset(rqd, 0, rq_size);
-
-       return rqd;
-}
-
-/* Typically used on completion path. Cannot guarantee request consistency */
-void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type)
-{
-       mempool_t *pool;
-
-       switch (type) {
-       case PBLK_WRITE:
-               kfree(((struct pblk_c_ctx *)nvm_rq_to_pdu(rqd))->lun_bitmap);
-               fallthrough;
-       case PBLK_WRITE_INT:
-               pool = &pblk->w_rq_pool;
-               break;
-       case PBLK_READ:
-               pool = &pblk->r_rq_pool;
-               break;
-       case PBLK_ERASE:
-               pool = &pblk->e_rq_pool;
-               break;
-       default:
-               pblk_err(pblk, "trying to free unknown rqd type\n");
-               return;
-       }
-
-       pblk_free_rqd_meta(pblk, rqd);
-       mempool_free(rqd, pool);
-}
-
-void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
-                        int nr_pages)
-{
-       struct bio_vec *bv;
-       struct page *page;
-       int i, e, nbv = 0;
-
-       for (i = 0; i < bio->bi_vcnt; i++) {
-               bv = &bio->bi_io_vec[i];
-               page = bv->bv_page;
-               for (e = 0; e < bv->bv_len; e += PBLK_EXPOSED_PAGE_SIZE, nbv++)
-                       if (nbv >= off)
-                               mempool_free(page++, &pblk->page_bio_pool);
-       }
-}
-
-int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
-                      int nr_pages)
-{
-       struct request_queue *q = pblk->dev->q;
-       struct page *page;
-       int i, ret;
-
-       for (i = 0; i < nr_pages; i++) {
-               page = mempool_alloc(&pblk->page_bio_pool, flags);
-
-               ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0);
-               if (ret != PBLK_EXPOSED_PAGE_SIZE) {
-                       pblk_err(pblk, "could not add page to bio\n");
-                       mempool_free(page, &pblk->page_bio_pool);
-                       goto err;
-               }
-       }
-
-       return 0;
-err:
-       pblk_bio_free_pages(pblk, bio, (bio->bi_vcnt - i), i);
-       return -1;
-}
-
-void pblk_write_kick(struct pblk *pblk)
-{
-       wake_up_process(pblk->writer_ts);
-       mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(1000));
-}
-
-void pblk_write_timer_fn(struct timer_list *t)
-{
-       struct pblk *pblk = from_timer(pblk, t, wtimer);
-
-       /* kick the write thread every tick to flush outstanding data */
-       pblk_write_kick(pblk);
-}
-
-void pblk_write_should_kick(struct pblk *pblk)
-{
-       unsigned int secs_avail = pblk_rb_read_count(&pblk->rwb);
-
-       if (secs_avail >= pblk->min_write_pgs_data)
-               pblk_write_kick(pblk);
-}
-
-static void pblk_wait_for_meta(struct pblk *pblk)
-{
-       do {
-               if (!atomic_read(&pblk->inflight_io))
-                       break;
-
-               schedule();
-       } while (1);
-}
-
-static void pblk_flush_writer(struct pblk *pblk)
-{
-       pblk_rb_flush(&pblk->rwb);
-       do {
-               if (!pblk_rb_sync_count(&pblk->rwb))
-                       break;
-
-               pblk_write_kick(pblk);
-               schedule();
-       } while (1);
-}
-
-struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
-{
-       struct pblk_line_meta *lm = &pblk->lm;
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       struct list_head *move_list = NULL;
-       int packed_meta = (le32_to_cpu(*line->vsc) / pblk->min_write_pgs_data)
-                       * (pblk->min_write_pgs - pblk->min_write_pgs_data);
-       int vsc = le32_to_cpu(*line->vsc) + packed_meta;
-
-       lockdep_assert_held(&line->lock);
-
-       if (line->w_err_gc->has_write_err) {
-               if (line->gc_group != PBLK_LINEGC_WERR) {
-                       line->gc_group = PBLK_LINEGC_WERR;
-                       move_list = &l_mg->gc_werr_list;
-                       pblk_rl_werr_line_in(&pblk->rl);
-               }
-       } else if (!vsc) {
-               if (line->gc_group != PBLK_LINEGC_FULL) {
-                       line->gc_group = PBLK_LINEGC_FULL;
-                       move_list = &l_mg->gc_full_list;
-               }
-       } else if (vsc < lm->high_thrs) {
-               if (line->gc_group != PBLK_LINEGC_HIGH) {
-                       line->gc_group = PBLK_LINEGC_HIGH;
-                       move_list = &l_mg->gc_high_list;
-               }
-       } else if (vsc < lm->mid_thrs) {
-               if (line->gc_group != PBLK_LINEGC_MID) {
-                       line->gc_group = PBLK_LINEGC_MID;
-                       move_list = &l_mg->gc_mid_list;
-               }
-       } else if (vsc < line->sec_in_line) {
-               if (line->gc_group != PBLK_LINEGC_LOW) {
-                       line->gc_group = PBLK_LINEGC_LOW;
-                       move_list = &l_mg->gc_low_list;
-               }
-       } else if (vsc == line->sec_in_line) {
-               if (line->gc_group != PBLK_LINEGC_EMPTY) {
-                       line->gc_group = PBLK_LINEGC_EMPTY;
-                       move_list = &l_mg->gc_empty_list;
-               }
-       } else {
-               line->state = PBLK_LINESTATE_CORRUPT;
-               trace_pblk_line_state(pblk_disk_name(pblk), line->id,
-                                       line->state);
-
-               line->gc_group = PBLK_LINEGC_NONE;
-               move_list =  &l_mg->corrupt_list;
-               pblk_err(pblk, "corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
-                                               line->id, vsc,
-                                               line->sec_in_line,
-                                               lm->high_thrs, lm->mid_thrs);
-       }
-
-       return move_list;
-}
-
-void pblk_discard(struct pblk *pblk, struct bio *bio)
-{
-       sector_t slba = pblk_get_lba(bio);
-       sector_t nr_secs = pblk_get_secs(bio);
-
-       pblk_invalidate_range(pblk, slba, nr_secs);
-}
-
-void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd)
-{
-       atomic_long_inc(&pblk->write_failed);
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       pblk_print_failed_rqd(pblk, rqd, rqd->error);
-#endif
-}
-
-void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd)
-{
-       /* Empty page read is not necessarily an error (e.g., L2P recovery) */
-       if (rqd->error == NVM_RSP_ERR_EMPTYPAGE) {
-               atomic_long_inc(&pblk->read_empty);
-               return;
-       }
-
-       switch (rqd->error) {
-       case NVM_RSP_WARN_HIGHECC:
-               atomic_long_inc(&pblk->read_high_ecc);
-               break;
-       case NVM_RSP_ERR_FAILECC:
-       case NVM_RSP_ERR_FAILCRC:
-               atomic_long_inc(&pblk->read_failed);
-               break;
-       default:
-               pblk_err(pblk, "unknown read error:%d\n", rqd->error);
-       }
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       pblk_print_failed_rqd(pblk, rqd, rqd->error);
-#endif
-}
-
-void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write)
-{
-       pblk->sec_per_write = sec_per_write;
-}
-
-int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd, void *buf)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-
-       atomic_inc(&pblk->inflight_io);
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       if (pblk_check_io(pblk, rqd))
-               return NVM_IO_ERR;
-#endif
-
-       return nvm_submit_io(dev, rqd, buf);
-}
-
-void pblk_check_chunk_state_update(struct pblk *pblk, struct nvm_rq *rqd)
-{
-       struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
-
-       int i;
-
-       for (i = 0; i < rqd->nr_ppas; i++) {
-               struct ppa_addr *ppa = &ppa_list[i];
-               struct nvm_chk_meta *chunk = pblk_dev_ppa_to_chunk(pblk, *ppa);
-               u64 caddr = pblk_dev_ppa_to_chunk_addr(pblk, *ppa);
-
-               if (caddr == 0)
-                       trace_pblk_chunk_state(pblk_disk_name(pblk),
-                                                       ppa, NVM_CHK_ST_OPEN);
-               else if (caddr == (chunk->cnlb - 1))
-                       trace_pblk_chunk_state(pblk_disk_name(pblk),
-                                                       ppa, NVM_CHK_ST_CLOSED);
-       }
-}
-
-int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd, void *buf)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       int ret;
-
-       atomic_inc(&pblk->inflight_io);
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       if (pblk_check_io(pblk, rqd))
-               return NVM_IO_ERR;
-#endif
-
-       ret = nvm_submit_io_sync(dev, rqd, buf);
-
-       if (trace_pblk_chunk_state_enabled() && !ret &&
-           rqd->opcode == NVM_OP_PWRITE)
-               pblk_check_chunk_state_update(pblk, rqd);
-
-       return ret;
-}
-
-static int pblk_submit_io_sync_sem(struct pblk *pblk, struct nvm_rq *rqd,
-                                  void *buf)
-{
-       struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
-       int ret;
-
-       pblk_down_chunk(pblk, ppa_list[0]);
-       ret = pblk_submit_io_sync(pblk, rqd, buf);
-       pblk_up_chunk(pblk, ppa_list[0]);
-
-       return ret;
-}
-
-int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
-                  unsigned long secs_to_flush, bool skip_meta)
-{
-       int max = pblk->sec_per_write;
-       int min = pblk->min_write_pgs;
-       int secs_to_sync = 0;
-
-       if (skip_meta && pblk->min_write_pgs_data != pblk->min_write_pgs)
-               min = max = pblk->min_write_pgs_data;
-
-       if (secs_avail >= max)
-               secs_to_sync = max;
-       else if (secs_avail >= min)
-               secs_to_sync = min * (secs_avail / min);
-       else if (secs_to_flush)
-               secs_to_sync = min;
-
-       return secs_to_sync;
-}
-
-void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
-{
-       u64 addr;
-       int i;
-
-       spin_lock(&line->lock);
-       addr = find_next_zero_bit(line->map_bitmap,
-                                       pblk->lm.sec_per_line, line->cur_sec);
-       line->cur_sec = addr - nr_secs;
-
-       for (i = 0; i < nr_secs; i++, line->cur_sec--)
-               WARN_ON(!test_and_clear_bit(line->cur_sec, line->map_bitmap));
-       spin_unlock(&line->lock);
-}
-
-u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
-{
-       u64 addr;
-       int i;
-
-       lockdep_assert_held(&line->lock);
-
-       /* logic error: ppa out-of-bounds. Prevent generating bad address */
-       if (line->cur_sec + nr_secs > pblk->lm.sec_per_line) {
-               WARN(1, "pblk: page allocation out of bounds\n");
-               nr_secs = pblk->lm.sec_per_line - line->cur_sec;
-       }
-
-       line->cur_sec = addr = find_next_zero_bit(line->map_bitmap,
-                                       pblk->lm.sec_per_line, line->cur_sec);
-       for (i = 0; i < nr_secs; i++, line->cur_sec++)
-               WARN_ON(test_and_set_bit(line->cur_sec, line->map_bitmap));
-
-       return addr;
-}
-
-u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
-{
-       u64 addr;
-
-       /* Lock needed in case a write fails and a recovery needs to remap
-        * failed write buffer entries
-        */
-       spin_lock(&line->lock);
-       addr = __pblk_alloc_page(pblk, line, nr_secs);
-       line->left_msecs -= nr_secs;
-       WARN(line->left_msecs < 0, "pblk: page allocation out of bounds\n");
-       spin_unlock(&line->lock);
-
-       return addr;
-}
-
-u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line)
-{
-       u64 paddr;
-
-       spin_lock(&line->lock);
-       paddr = find_next_zero_bit(line->map_bitmap,
-                                       pblk->lm.sec_per_line, line->cur_sec);
-       spin_unlock(&line->lock);
-
-       return paddr;
-}
-
-u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct pblk_line_meta *lm = &pblk->lm;
-       int bit;
-
-       /* This usually only happens on bad lines */
-       bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
-       if (bit >= lm->blk_per_line)
-               return -1;
-
-       return bit * geo->ws_opt;
-}
-
-int pblk_line_smeta_read(struct pblk *pblk, struct pblk_line *line)
-{
-       struct pblk_line_meta *lm = &pblk->lm;
-       struct ppa_addr *ppa_list;
-       struct nvm_rq rqd;
-       u64 paddr = pblk_line_smeta_start(pblk, line);
-       int i, ret;
-
-       memset(&rqd, 0, sizeof(struct nvm_rq));
-
-       ret = pblk_alloc_rqd_meta(pblk, &rqd);
-       if (ret)
-               return ret;
-
-       rqd.opcode = NVM_OP_PREAD;
-       rqd.nr_ppas = lm->smeta_sec;
-       rqd.is_seq = 1;
-       ppa_list = nvm_rq_to_ppa_list(&rqd);
-
-       for (i = 0; i < lm->smeta_sec; i++, paddr++)
-               ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
-
-       ret = pblk_submit_io_sync(pblk, &rqd, line->smeta);
-       if (ret) {
-               pblk_err(pblk, "smeta I/O submission failed: %d\n", ret);
-               goto clear_rqd;
-       }
-
-       atomic_dec(&pblk->inflight_io);
-
-       if (rqd.error && rqd.error != NVM_RSP_WARN_HIGHECC) {
-               pblk_log_read_err(pblk, &rqd);
-               ret = -EIO;
-       }
-
-clear_rqd:
-       pblk_free_rqd_meta(pblk, &rqd);
-       return ret;
-}
-
-static int pblk_line_smeta_write(struct pblk *pblk, struct pblk_line *line,
-                                u64 paddr)
-{
-       struct pblk_line_meta *lm = &pblk->lm;
-       struct ppa_addr *ppa_list;
-       struct nvm_rq rqd;
-       __le64 *lba_list = emeta_to_lbas(pblk, line->emeta->buf);
-       __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
-       int i, ret;
-
-       memset(&rqd, 0, sizeof(struct nvm_rq));
-
-       ret = pblk_alloc_rqd_meta(pblk, &rqd);
-       if (ret)
-               return ret;
-
-       rqd.opcode = NVM_OP_PWRITE;
-       rqd.nr_ppas = lm->smeta_sec;
-       rqd.is_seq = 1;
-       ppa_list = nvm_rq_to_ppa_list(&rqd);
-
-       for (i = 0; i < lm->smeta_sec; i++, paddr++) {
-               struct pblk_sec_meta *meta = pblk_get_meta(pblk,
-                                                          rqd.meta_list, i);
-
-               ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
-               meta->lba = lba_list[paddr] = addr_empty;
-       }
-
-       ret = pblk_submit_io_sync_sem(pblk, &rqd, line->smeta);
-       if (ret) {
-               pblk_err(pblk, "smeta I/O submission failed: %d\n", ret);
-               goto clear_rqd;
-       }
-
-       atomic_dec(&pblk->inflight_io);
-
-       if (rqd.error) {
-               pblk_log_write_err(pblk, &rqd);
-               ret = -EIO;
-       }
-
-clear_rqd:
-       pblk_free_rqd_meta(pblk, &rqd);
-       return ret;
-}
-
-int pblk_line_emeta_read(struct pblk *pblk, struct pblk_line *line,
-                        void *emeta_buf)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct pblk_line_meta *lm = &pblk->lm;
-       void *ppa_list_buf, *meta_list;
-       struct ppa_addr *ppa_list;
-       struct nvm_rq rqd;
-       u64 paddr = line->emeta_ssec;
-       dma_addr_t dma_ppa_list, dma_meta_list;
-       int min = pblk->min_write_pgs;
-       int left_ppas = lm->emeta_sec[0];
-       int line_id = line->id;
-       int rq_ppas, rq_len;
-       int i, j;
-       int ret;
-
-       meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
-                                                       &dma_meta_list);
-       if (!meta_list)
-               return -ENOMEM;
-
-       ppa_list_buf = meta_list + pblk_dma_meta_size(pblk);
-       dma_ppa_list = dma_meta_list + pblk_dma_meta_size(pblk);
-
-next_rq:
-       memset(&rqd, 0, sizeof(struct nvm_rq));
-
-       rq_ppas = pblk_calc_secs(pblk, left_ppas, 0, false);
-       rq_len = rq_ppas * geo->csecs;
-
-       rqd.meta_list = meta_list;
-       rqd.ppa_list = ppa_list_buf;
-       rqd.dma_meta_list = dma_meta_list;
-       rqd.dma_ppa_list = dma_ppa_list;
-       rqd.opcode = NVM_OP_PREAD;
-       rqd.nr_ppas = rq_ppas;
-       ppa_list = nvm_rq_to_ppa_list(&rqd);
-
-       for (i = 0; i < rqd.nr_ppas; ) {
-               struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, line_id);
-               int pos = pblk_ppa_to_pos(geo, ppa);
-
-               if (pblk_io_aligned(pblk, rq_ppas))
-                       rqd.is_seq = 1;
-
-               while (test_bit(pos, line->blk_bitmap)) {
-                       paddr += min;
-                       if (pblk_boundary_paddr_checks(pblk, paddr)) {
-                               ret = -EINTR;
-                               goto free_rqd_dma;
-                       }
-
-                       ppa = addr_to_gen_ppa(pblk, paddr, line_id);
-                       pos = pblk_ppa_to_pos(geo, ppa);
-               }
-
-               if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
-                       ret = -EINTR;
-                       goto free_rqd_dma;
-               }
-
-               for (j = 0; j < min; j++, i++, paddr++)
-                       ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line_id);
-       }
-
-       ret = pblk_submit_io_sync(pblk, &rqd, emeta_buf);
-       if (ret) {
-               pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
-               goto free_rqd_dma;
-       }
-
-       atomic_dec(&pblk->inflight_io);
-
-       if (rqd.error && rqd.error != NVM_RSP_WARN_HIGHECC) {
-               pblk_log_read_err(pblk, &rqd);
-               ret = -EIO;
-               goto free_rqd_dma;
-       }
-
-       emeta_buf += rq_len;
-       left_ppas -= rq_ppas;
-       if (left_ppas)
-               goto next_rq;
-
-free_rqd_dma:
-       nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
-       return ret;
-}
-
-static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
-                           struct ppa_addr ppa)
-{
-       rqd->opcode = NVM_OP_ERASE;
-       rqd->ppa_addr = ppa;
-       rqd->nr_ppas = 1;
-       rqd->is_seq = 1;
-       rqd->bio = NULL;
-}
-
-static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
-{
-       struct nvm_rq rqd = {NULL};
-       int ret;
-
-       trace_pblk_chunk_reset(pblk_disk_name(pblk), &ppa,
-                               PBLK_CHUNK_RESET_START);
-
-       pblk_setup_e_rq(pblk, &rqd, ppa);
-
-       /* The write thread schedules erases so that it minimizes disturbances
-        * with writes. Thus, there is no need to take the LUN semaphore.
-        */
-       ret = pblk_submit_io_sync(pblk, &rqd, NULL);
-       rqd.private = pblk;
-       __pblk_end_io_erase(pblk, &rqd);
-
-       return ret;
-}
-
-int pblk_line_erase(struct pblk *pblk, struct pblk_line *line)
-{
-       struct pblk_line_meta *lm = &pblk->lm;
-       struct ppa_addr ppa;
-       int ret, bit = -1;
-
-       /* Erase only good blocks, one at a time */
-       do {
-               spin_lock(&line->lock);
-               bit = find_next_zero_bit(line->erase_bitmap, lm->blk_per_line,
-                                                               bit + 1);
-               if (bit >= lm->blk_per_line) {
-                       spin_unlock(&line->lock);
-                       break;
-               }
-
-               ppa = pblk->luns[bit].bppa; /* set ch and lun */
-               ppa.a.blk = line->id;
-
-               atomic_dec(&line->left_eblks);
-               WARN_ON(test_and_set_bit(bit, line->erase_bitmap));
-               spin_unlock(&line->lock);
-
-               ret = pblk_blk_erase_sync(pblk, ppa);
-               if (ret) {
-                       pblk_err(pblk, "failed to erase line %d\n", line->id);
-                       return ret;
-               }
-       } while (1);
-
-       return 0;
-}
-
-static void pblk_line_setup_metadata(struct pblk_line *line,
-                                    struct pblk_line_mgmt *l_mg,
-                                    struct pblk_line_meta *lm)
-{
-       int meta_line;
-
-       lockdep_assert_held(&l_mg->free_lock);
-
-retry_meta:
-       meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
-       if (meta_line == PBLK_DATA_LINES) {
-               spin_unlock(&l_mg->free_lock);
-               io_schedule();
-               spin_lock(&l_mg->free_lock);
-               goto retry_meta;
-       }
-
-       set_bit(meta_line, &l_mg->meta_bitmap);
-       line->meta_line = meta_line;
-
-       line->smeta = l_mg->sline_meta[meta_line];
-       line->emeta = l_mg->eline_meta[meta_line];
-
-       memset(line->smeta, 0, lm->smeta_len);
-       memset(line->emeta->buf, 0, lm->emeta_len[0]);
-
-       line->emeta->mem = 0;
-       atomic_set(&line->emeta->sync, 0);
-}
-
-/* For now lines are always assumed full lines. Thus, smeta former and current
- * lun bitmaps are omitted.
- */
-static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
-                                 struct pblk_line *cur)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct pblk_line_meta *lm = &pblk->lm;
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       struct pblk_emeta *emeta = line->emeta;
-       struct line_emeta *emeta_buf = emeta->buf;
-       struct line_smeta *smeta_buf = (struct line_smeta *)line->smeta;
-       int nr_blk_line;
-
-       /* After erasing the line, new bad blocks might appear and we risk
-        * having an invalid line
-        */
-       nr_blk_line = lm->blk_per_line -
-                       bitmap_weight(line->blk_bitmap, lm->blk_per_line);
-       if (nr_blk_line < lm->min_blk_line) {
-               spin_lock(&l_mg->free_lock);
-               spin_lock(&line->lock);
-               line->state = PBLK_LINESTATE_BAD;
-               trace_pblk_line_state(pblk_disk_name(pblk), line->id,
-                                       line->state);
-               spin_unlock(&line->lock);
-
-               list_add_tail(&line->list, &l_mg->bad_list);
-               spin_unlock(&l_mg->free_lock);
-
-               pblk_debug(pblk, "line %d is bad\n", line->id);
-
-               return 0;
-       }
-
-       /* Run-time metadata */
-       line->lun_bitmap = ((void *)(smeta_buf)) + sizeof(struct line_smeta);
-
-       /* Mark LUNs allocated in this line (all for now) */
-       bitmap_set(line->lun_bitmap, 0, lm->lun_bitmap_len);
-
-       smeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
-       export_guid(smeta_buf->header.uuid, &pblk->instance_uuid);
-       smeta_buf->header.id = cpu_to_le32(line->id);
-       smeta_buf->header.type = cpu_to_le16(line->type);
-       smeta_buf->header.version_major = SMETA_VERSION_MAJOR;
-       smeta_buf->header.version_minor = SMETA_VERSION_MINOR;
-
-       /* Start metadata */
-       smeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
-       smeta_buf->window_wr_lun = cpu_to_le32(geo->all_luns);
-
-       /* Fill metadata among lines */
-       if (cur) {
-               memcpy(line->lun_bitmap, cur->lun_bitmap, lm->lun_bitmap_len);
-               smeta_buf->prev_id = cpu_to_le32(cur->id);
-               cur->emeta->buf->next_id = cpu_to_le32(line->id);
-       } else {
-               smeta_buf->prev_id = cpu_to_le32(PBLK_LINE_EMPTY);
-       }
-
-       /* All smeta must be set at this point */
-       smeta_buf->header.crc = cpu_to_le32(
-                       pblk_calc_meta_header_crc(pblk, &smeta_buf->header));
-       smeta_buf->crc = cpu_to_le32(pblk_calc_smeta_crc(pblk, smeta_buf));
-
-       /* End metadata */
-       memcpy(&emeta_buf->header, &smeta_buf->header,
-                                               sizeof(struct line_header));
-
-       emeta_buf->header.version_major = EMETA_VERSION_MAJOR;
-       emeta_buf->header.version_minor = EMETA_VERSION_MINOR;
-       emeta_buf->header.crc = cpu_to_le32(
-                       pblk_calc_meta_header_crc(pblk, &emeta_buf->header));
-
-       emeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
-       emeta_buf->nr_lbas = cpu_to_le64(line->sec_in_line);
-       emeta_buf->nr_valid_lbas = cpu_to_le64(0);
-       emeta_buf->next_id = cpu_to_le32(PBLK_LINE_EMPTY);
-       emeta_buf->crc = cpu_to_le32(0);
-       emeta_buf->prev_id = smeta_buf->prev_id;
-
-       return 1;
-}
-
-static int pblk_line_alloc_bitmaps(struct pblk *pblk, struct pblk_line *line)
-{
-       struct pblk_line_meta *lm = &pblk->lm;
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-
-       line->map_bitmap = mempool_alloc(l_mg->bitmap_pool, GFP_KERNEL);
-       if (!line->map_bitmap)
-               return -ENOMEM;
-
-       memset(line->map_bitmap, 0, lm->sec_bitmap_len);
-
-       /* will be initialized using bb info from map_bitmap */
-       line->invalid_bitmap = mempool_alloc(l_mg->bitmap_pool, GFP_KERNEL);
-       if (!line->invalid_bitmap) {
-               mempool_free(line->map_bitmap, l_mg->bitmap_pool);
-               line->map_bitmap = NULL;
-               return -ENOMEM;
-       }
-
-       return 0;
-}
-
-/* For now lines are always assumed full lines. Thus, smeta former and current
- * lun bitmaps are omitted.
- */
-static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
-                            int init)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct pblk_line_meta *lm = &pblk->lm;
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       u64 off;
-       int bit = -1;
-       int emeta_secs;
-
-       line->sec_in_line = lm->sec_per_line;
-
-       /* Capture bad block information on line mapping bitmaps */
-       while ((bit = find_next_bit(line->blk_bitmap, lm->blk_per_line,
-                                       bit + 1)) < lm->blk_per_line) {
-               off = bit * geo->ws_opt;
-               bitmap_shift_left(l_mg->bb_aux, l_mg->bb_template, off,
-                                                       lm->sec_per_line);
-               bitmap_or(line->map_bitmap, line->map_bitmap, l_mg->bb_aux,
-                                                       lm->sec_per_line);
-               line->sec_in_line -= geo->clba;
-       }
-
-       /* Mark smeta metadata sectors as bad sectors */
-       bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
-       off = bit * geo->ws_opt;
-       bitmap_set(line->map_bitmap, off, lm->smeta_sec);
-       line->sec_in_line -= lm->smeta_sec;
-       line->cur_sec = off + lm->smeta_sec;
-
-       if (init && pblk_line_smeta_write(pblk, line, off)) {
-               pblk_debug(pblk, "line smeta I/O failed. Retry\n");
-               return 0;
-       }
-
-       bitmap_copy(line->invalid_bitmap, line->map_bitmap, lm->sec_per_line);
-
-       /* Mark emeta metadata sectors as bad sectors. We need to consider bad
-        * blocks to make sure that there are enough sectors to store emeta
-        */
-       emeta_secs = lm->emeta_sec[0];
-       off = lm->sec_per_line;
-       while (emeta_secs) {
-               off -= geo->ws_opt;
-               if (!test_bit(off, line->invalid_bitmap)) {
-                       bitmap_set(line->invalid_bitmap, off, geo->ws_opt);
-                       emeta_secs -= geo->ws_opt;
-               }
-       }
-
-       line->emeta_ssec = off;
-       line->sec_in_line -= lm->emeta_sec[0];
-       line->nr_valid_lbas = 0;
-       line->left_msecs = line->sec_in_line;
-       *line->vsc = cpu_to_le32(line->sec_in_line);
-
-       if (lm->sec_per_line - line->sec_in_line !=
-               bitmap_weight(line->invalid_bitmap, lm->sec_per_line)) {
-               spin_lock(&line->lock);
-               line->state = PBLK_LINESTATE_BAD;
-               trace_pblk_line_state(pblk_disk_name(pblk), line->id,
-                                       line->state);
-               spin_unlock(&line->lock);
-
-               list_add_tail(&line->list, &l_mg->bad_list);
-               pblk_err(pblk, "unexpected line %d is bad\n", line->id);
-
-               return 0;
-       }
-
-       return 1;
-}
-
-static int pblk_prepare_new_line(struct pblk *pblk, struct pblk_line *line)
-{
-       struct pblk_line_meta *lm = &pblk->lm;
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       int blk_to_erase = atomic_read(&line->blk_in_line);
-       int i;
-
-       for (i = 0; i < lm->blk_per_line; i++) {
-               struct pblk_lun *rlun = &pblk->luns[i];
-               int pos = pblk_ppa_to_pos(geo, rlun->bppa);
-               int state = line->chks[pos].state;
-
-               /* Free chunks should not be erased */
-               if (state & NVM_CHK_ST_FREE) {
-                       set_bit(pblk_ppa_to_pos(geo, rlun->bppa),
-                                                       line->erase_bitmap);
-                       blk_to_erase--;
-               }
-       }
-
-       return blk_to_erase;
-}
-
-static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
-{
-       struct pblk_line_meta *lm = &pblk->lm;
-       int blk_in_line = atomic_read(&line->blk_in_line);
-       int blk_to_erase;
-
-       /* Bad blocks do not need to be erased */
-       bitmap_copy(line->erase_bitmap, line->blk_bitmap, lm->blk_per_line);
-
-       spin_lock(&line->lock);
-
-       /* If we have not written to this line, we need to mark up free chunks
-        * as already erased
-        */
-       if (line->state == PBLK_LINESTATE_NEW) {
-               blk_to_erase = pblk_prepare_new_line(pblk, line);
-               line->state = PBLK_LINESTATE_FREE;
-               trace_pblk_line_state(pblk_disk_name(pblk), line->id,
-                                       line->state);
-       } else {
-               blk_to_erase = blk_in_line;
-       }
-
-       if (blk_in_line < lm->min_blk_line) {
-               spin_unlock(&line->lock);
-               return -EAGAIN;
-       }
-
-       if (line->state != PBLK_LINESTATE_FREE) {
-               WARN(1, "pblk: corrupted line %d, state %d\n",
-                                                       line->id, line->state);
-               spin_unlock(&line->lock);
-               return -EINTR;
-       }
-
-       line->state = PBLK_LINESTATE_OPEN;
-       trace_pblk_line_state(pblk_disk_name(pblk), line->id,
-                               line->state);
-
-       atomic_set(&line->left_eblks, blk_to_erase);
-       atomic_set(&line->left_seblks, blk_to_erase);
-
-       line->meta_distance = lm->meta_distance;
-       spin_unlock(&line->lock);
-
-       kref_init(&line->ref);
-       atomic_set(&line->sec_to_update, 0);
-
-       return 0;
-}
-
-/* Line allocations in the recovery path are always single threaded */
-int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
-{
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       int ret;
-
-       spin_lock(&l_mg->free_lock);
-       l_mg->data_line = line;
-       list_del(&line->list);
-
-       ret = pblk_line_prepare(pblk, line);
-       if (ret) {
-               list_add(&line->list, &l_mg->free_list);
-               spin_unlock(&l_mg->free_lock);
-               return ret;
-       }
-       spin_unlock(&l_mg->free_lock);
-
-       ret = pblk_line_alloc_bitmaps(pblk, line);
-       if (ret)
-               goto fail;
-
-       if (!pblk_line_init_bb(pblk, line, 0)) {
-               ret = -EINTR;
-               goto fail;
-       }
-
-       pblk_rl_free_lines_dec(&pblk->rl, line, true);
-       return 0;
-
-fail:
-       spin_lock(&l_mg->free_lock);
-       list_add(&line->list, &l_mg->free_list);
-       spin_unlock(&l_mg->free_lock);
-
-       return ret;
-}
-
-void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
-{
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-
-       mempool_free(line->map_bitmap, l_mg->bitmap_pool);
-       line->map_bitmap = NULL;
-       line->smeta = NULL;
-       line->emeta = NULL;
-}
-
-static void pblk_line_reinit(struct pblk_line *line)
-{
-       *line->vsc = cpu_to_le32(EMPTY_ENTRY);
-
-       line->map_bitmap = NULL;
-       line->invalid_bitmap = NULL;
-       line->smeta = NULL;
-       line->emeta = NULL;
-}
-
-void pblk_line_free(struct pblk_line *line)
-{
-       struct pblk *pblk = line->pblk;
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-
-       mempool_free(line->map_bitmap, l_mg->bitmap_pool);
-       mempool_free(line->invalid_bitmap, l_mg->bitmap_pool);
-
-       pblk_line_reinit(line);
-}
-
-struct pblk_line *pblk_line_get(struct pblk *pblk)
-{
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       struct pblk_line_meta *lm = &pblk->lm;
-       struct pblk_line *line;
-       int ret, bit;
-
-       lockdep_assert_held(&l_mg->free_lock);
-
-retry:
-       if (list_empty(&l_mg->free_list)) {
-               pblk_err(pblk, "no free lines\n");
-               return NULL;
-       }
-
-       line = list_first_entry(&l_mg->free_list, struct pblk_line, list);
-       list_del(&line->list);
-       l_mg->nr_free_lines--;
-
-       bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
-       if (unlikely(bit >= lm->blk_per_line)) {
-               spin_lock(&line->lock);
-               line->state = PBLK_LINESTATE_BAD;
-               trace_pblk_line_state(pblk_disk_name(pblk), line->id,
-                                       line->state);
-               spin_unlock(&line->lock);
-
-               list_add_tail(&line->list, &l_mg->bad_list);
-
-               pblk_debug(pblk, "line %d is bad\n", line->id);
-               goto retry;
-       }
-
-       ret = pblk_line_prepare(pblk, line);
-       if (ret) {
-               switch (ret) {
-               case -EAGAIN:
-                       list_add(&line->list, &l_mg->bad_list);
-                       goto retry;
-               case -EINTR:
-                       list_add(&line->list, &l_mg->corrupt_list);
-                       goto retry;
-               default:
-                       pblk_err(pblk, "failed to prepare line %d\n", line->id);
-                       list_add(&line->list, &l_mg->free_list);
-                       l_mg->nr_free_lines++;
-                       return NULL;
-               }
-       }
-
-       return line;
-}
-
-static struct pblk_line *pblk_line_retry(struct pblk *pblk,
-                                        struct pblk_line *line)
-{
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       struct pblk_line *retry_line;
-
-retry:
-       spin_lock(&l_mg->free_lock);
-       retry_line = pblk_line_get(pblk);
-       if (!retry_line) {
-               l_mg->data_line = NULL;
-               spin_unlock(&l_mg->free_lock);
-               return NULL;
-       }
-
-       retry_line->map_bitmap = line->map_bitmap;
-       retry_line->invalid_bitmap = line->invalid_bitmap;
-       retry_line->smeta = line->smeta;
-       retry_line->emeta = line->emeta;
-       retry_line->meta_line = line->meta_line;
-
-       pblk_line_reinit(line);
-
-       l_mg->data_line = retry_line;
-       spin_unlock(&l_mg->free_lock);
-
-       pblk_rl_free_lines_dec(&pblk->rl, line, false);
-
-       if (pblk_line_erase(pblk, retry_line))
-               goto retry;
-
-       return retry_line;
-}
-
-static void pblk_set_space_limit(struct pblk *pblk)
-{
-       struct pblk_rl *rl = &pblk->rl;
-
-       atomic_set(&rl->rb_space, 0);
-}
-
-struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
-{
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       struct pblk_line *line;
-
-       spin_lock(&l_mg->free_lock);
-       line = pblk_line_get(pblk);
-       if (!line) {
-               spin_unlock(&l_mg->free_lock);
-               return NULL;
-       }
-
-       line->seq_nr = l_mg->d_seq_nr++;
-       line->type = PBLK_LINETYPE_DATA;
-       l_mg->data_line = line;
-
-       pblk_line_setup_metadata(line, l_mg, &pblk->lm);
-
-       /* Allocate next line for preparation */
-       l_mg->data_next = pblk_line_get(pblk);
-       if (!l_mg->data_next) {
-               /* If we cannot get a new line, we need to stop the pipeline.
-                * Only allow as many writes in as we can store safely and then
-                * fail gracefully
-                */
-               pblk_set_space_limit(pblk);
-
-               l_mg->data_next = NULL;
-       } else {
-               l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
-               l_mg->data_next->type = PBLK_LINETYPE_DATA;
-       }
-       spin_unlock(&l_mg->free_lock);
-
-       if (pblk_line_alloc_bitmaps(pblk, line))
-               return NULL;
-
-       if (pblk_line_erase(pblk, line)) {
-               line = pblk_line_retry(pblk, line);
-               if (!line)
-                       return NULL;
-       }
-
-retry_setup:
-       if (!pblk_line_init_metadata(pblk, line, NULL)) {
-               line = pblk_line_retry(pblk, line);
-               if (!line)
-                       return NULL;
-
-               goto retry_setup;
-       }
-
-       if (!pblk_line_init_bb(pblk, line, 1)) {
-               line = pblk_line_retry(pblk, line);
-               if (!line)
-                       return NULL;
-
-               goto retry_setup;
-       }
-
-       pblk_rl_free_lines_dec(&pblk->rl, line, true);
-
-       return line;
-}
-
-void pblk_ppa_to_line_put(struct pblk *pblk, struct ppa_addr ppa)
-{
-       struct pblk_line *line;
-
-       line = pblk_ppa_to_line(pblk, ppa);
-       kref_put(&line->ref, pblk_line_put_wq);
-}
-
-void pblk_rq_to_line_put(struct pblk *pblk, struct nvm_rq *rqd)
-{
-       struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
-       int i;
-
-       for (i = 0; i < rqd->nr_ppas; i++)
-               pblk_ppa_to_line_put(pblk, ppa_list[i]);
-}
-
-static void pblk_stop_writes(struct pblk *pblk, struct pblk_line *line)
-{
-       lockdep_assert_held(&pblk->l_mg.free_lock);
-
-       pblk_set_space_limit(pblk);
-       pblk->state = PBLK_STATE_STOPPING;
-       trace_pblk_state(pblk_disk_name(pblk), pblk->state);
-}
-
-static void pblk_line_close_meta_sync(struct pblk *pblk)
-{
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       struct pblk_line_meta *lm = &pblk->lm;
-       struct pblk_line *line, *tline;
-       LIST_HEAD(list);
-
-       spin_lock(&l_mg->close_lock);
-       if (list_empty(&l_mg->emeta_list)) {
-               spin_unlock(&l_mg->close_lock);
-               return;
-       }
-
-       list_cut_position(&list, &l_mg->emeta_list, l_mg->emeta_list.prev);
-       spin_unlock(&l_mg->close_lock);
-
-       list_for_each_entry_safe(line, tline, &list, list) {
-               struct pblk_emeta *emeta = line->emeta;
-
-               while (emeta->mem < lm->emeta_len[0]) {
-                       int ret;
-
-                       ret = pblk_submit_meta_io(pblk, line);
-                       if (ret) {
-                               pblk_err(pblk, "sync meta line %d failed (%d)\n",
-                                                       line->id, ret);
-                               return;
-                       }
-               }
-       }
-
-       pblk_wait_for_meta(pblk);
-       flush_workqueue(pblk->close_wq);
-}
-
-void __pblk_pipeline_flush(struct pblk *pblk)
-{
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       int ret;
-
-       spin_lock(&l_mg->free_lock);
-       if (pblk->state == PBLK_STATE_RECOVERING ||
-                                       pblk->state == PBLK_STATE_STOPPED) {
-               spin_unlock(&l_mg->free_lock);
-               return;
-       }
-       pblk->state = PBLK_STATE_RECOVERING;
-       trace_pblk_state(pblk_disk_name(pblk), pblk->state);
-       spin_unlock(&l_mg->free_lock);
-
-       pblk_flush_writer(pblk);
-       pblk_wait_for_meta(pblk);
-
-       ret = pblk_recov_pad(pblk);
-       if (ret) {
-               pblk_err(pblk, "could not close data on teardown(%d)\n", ret);
-               return;
-       }
-
-       flush_workqueue(pblk->bb_wq);
-       pblk_line_close_meta_sync(pblk);
-}
-
-void __pblk_pipeline_stop(struct pblk *pblk)
-{
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-
-       spin_lock(&l_mg->free_lock);
-       pblk->state = PBLK_STATE_STOPPED;
-       trace_pblk_state(pblk_disk_name(pblk), pblk->state);
-       l_mg->data_line = NULL;
-       l_mg->data_next = NULL;
-       spin_unlock(&l_mg->free_lock);
-}
-
-void pblk_pipeline_stop(struct pblk *pblk)
-{
-       __pblk_pipeline_flush(pblk);
-       __pblk_pipeline_stop(pblk);
-}
-
-struct pblk_line *pblk_line_replace_data(struct pblk *pblk)
-{
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       struct pblk_line *cur, *new = NULL;
-       unsigned int left_seblks;
-
-       new = l_mg->data_next;
-       if (!new)
-               goto out;
-
-       spin_lock(&l_mg->free_lock);
-       cur = l_mg->data_line;
-       l_mg->data_line = new;
-
-       pblk_line_setup_metadata(new, l_mg, &pblk->lm);
-       spin_unlock(&l_mg->free_lock);
-
-retry_erase:
-       left_seblks = atomic_read(&new->left_seblks);
-       if (left_seblks) {
-               /* If line is not fully erased, erase it */
-               if (atomic_read(&new->left_eblks)) {
-                       if (pblk_line_erase(pblk, new))
-                               goto out;
-               } else {
-                       io_schedule();
-               }
-               goto retry_erase;
-       }
-
-       if (pblk_line_alloc_bitmaps(pblk, new))
-               return NULL;
-
-retry_setup:
-       if (!pblk_line_init_metadata(pblk, new, cur)) {
-               new = pblk_line_retry(pblk, new);
-               if (!new)
-                       goto out;
-
-               goto retry_setup;
-       }
-
-       if (!pblk_line_init_bb(pblk, new, 1)) {
-               new = pblk_line_retry(pblk, new);
-               if (!new)
-                       goto out;
-
-               goto retry_setup;
-       }
-
-       pblk_rl_free_lines_dec(&pblk->rl, new, true);
-
-       /* Allocate next line for preparation */
-       spin_lock(&l_mg->free_lock);
-       l_mg->data_next = pblk_line_get(pblk);
-       if (!l_mg->data_next) {
-               /* If we cannot get a new line, we need to stop the pipeline.
-                * Only allow as many writes in as we can store safely and then
-                * fail gracefully
-                */
-               pblk_stop_writes(pblk, new);
-               l_mg->data_next = NULL;
-       } else {
-               l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
-               l_mg->data_next->type = PBLK_LINETYPE_DATA;
-       }
-       spin_unlock(&l_mg->free_lock);
-
-out:
-       return new;
-}
-
-static void __pblk_line_put(struct pblk *pblk, struct pblk_line *line)
-{
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       struct pblk_gc *gc = &pblk->gc;
-
-       spin_lock(&line->lock);
-       WARN_ON(line->state != PBLK_LINESTATE_GC);
-       if (line->w_err_gc->has_gc_err) {
-               spin_unlock(&line->lock);
-               pblk_err(pblk, "line %d had errors during GC\n", line->id);
-               pblk_put_line_back(pblk, line);
-               line->w_err_gc->has_gc_err = 0;
-               return;
-       }
-
-       line->state = PBLK_LINESTATE_FREE;
-       trace_pblk_line_state(pblk_disk_name(pblk), line->id,
-                                       line->state);
-       line->gc_group = PBLK_LINEGC_NONE;
-       pblk_line_free(line);
-
-       if (line->w_err_gc->has_write_err) {
-               pblk_rl_werr_line_out(&pblk->rl);
-               line->w_err_gc->has_write_err = 0;
-       }
-
-       spin_unlock(&line->lock);
-       atomic_dec(&gc->pipeline_gc);
-
-       spin_lock(&l_mg->free_lock);
-       list_add_tail(&line->list, &l_mg->free_list);
-       l_mg->nr_free_lines++;
-       spin_unlock(&l_mg->free_lock);
-
-       pblk_rl_free_lines_inc(&pblk->rl, line);
-}
-
-static void pblk_line_put_ws(struct work_struct *work)
-{
-       struct pblk_line_ws *line_put_ws = container_of(work,
-                                               struct pblk_line_ws, ws);
-       struct pblk *pblk = line_put_ws->pblk;
-       struct pblk_line *line = line_put_ws->line;
-
-       __pblk_line_put(pblk, line);
-       mempool_free(line_put_ws, &pblk->gen_ws_pool);
-}
-
-void pblk_line_put(struct kref *ref)
-{
-       struct pblk_line *line = container_of(ref, struct pblk_line, ref);
-       struct pblk *pblk = line->pblk;
-
-       __pblk_line_put(pblk, line);
-}
-
-void pblk_line_put_wq(struct kref *ref)
-{
-       struct pblk_line *line = container_of(ref, struct pblk_line, ref);
-       struct pblk *pblk = line->pblk;
-       struct pblk_line_ws *line_put_ws;
-
-       line_put_ws = mempool_alloc(&pblk->gen_ws_pool, GFP_ATOMIC);
-       if (!line_put_ws)
-               return;
-
-       line_put_ws->pblk = pblk;
-       line_put_ws->line = line;
-       line_put_ws->priv = NULL;
-
-       INIT_WORK(&line_put_ws->ws, pblk_line_put_ws);
-       queue_work(pblk->r_end_wq, &line_put_ws->ws);
-}
-
-int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
-{
-       struct nvm_rq *rqd;
-       int err;
-
-       rqd = pblk_alloc_rqd(pblk, PBLK_ERASE);
-
-       pblk_setup_e_rq(pblk, rqd, ppa);
-
-       rqd->end_io = pblk_end_io_erase;
-       rqd->private = pblk;
-
-       trace_pblk_chunk_reset(pblk_disk_name(pblk),
-                               &ppa, PBLK_CHUNK_RESET_START);
-
-       /* The write thread schedules erases so that it minimizes disturbances
-        * with writes. Thus, there is no need to take the LUN semaphore.
-        */
-       err = pblk_submit_io(pblk, rqd, NULL);
-       if (err) {
-               struct nvm_tgt_dev *dev = pblk->dev;
-               struct nvm_geo *geo = &dev->geo;
-
-               pblk_err(pblk, "could not async erase line:%d,blk:%d\n",
-                                       pblk_ppa_to_line_id(ppa),
-                                       pblk_ppa_to_pos(geo, ppa));
-       }
-
-       return err;
-}
-
-struct pblk_line *pblk_line_get_data(struct pblk *pblk)
-{
-       return pblk->l_mg.data_line;
-}
-
-/* For now, always erase next line */
-struct pblk_line *pblk_line_get_erase(struct pblk *pblk)
-{
-       return pblk->l_mg.data_next;
-}
-
-int pblk_line_is_full(struct pblk_line *line)
-{
-       return (line->left_msecs == 0);
-}
-
-static void pblk_line_should_sync_meta(struct pblk *pblk)
-{
-       if (pblk_rl_is_limit(&pblk->rl))
-               pblk_line_close_meta_sync(pblk);
-}
-
-void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct pblk_line_meta *lm = &pblk->lm;
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       struct list_head *move_list;
-       int i;
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       WARN(!bitmap_full(line->map_bitmap, lm->sec_per_line),
-                               "pblk: corrupt closed line %d\n", line->id);
-#endif
-
-       spin_lock(&l_mg->free_lock);
-       WARN_ON(!test_and_clear_bit(line->meta_line, &l_mg->meta_bitmap));
-       spin_unlock(&l_mg->free_lock);
-
-       spin_lock(&l_mg->gc_lock);
-       spin_lock(&line->lock);
-       WARN_ON(line->state != PBLK_LINESTATE_OPEN);
-       line->state = PBLK_LINESTATE_CLOSED;
-       move_list = pblk_line_gc_list(pblk, line);
-       list_add_tail(&line->list, move_list);
-
-       mempool_free(line->map_bitmap, l_mg->bitmap_pool);
-       line->map_bitmap = NULL;
-       line->smeta = NULL;
-       line->emeta = NULL;
-
-       for (i = 0; i < lm->blk_per_line; i++) {
-               struct pblk_lun *rlun = &pblk->luns[i];
-               int pos = pblk_ppa_to_pos(geo, rlun->bppa);
-               int state = line->chks[pos].state;
-
-               if (!(state & NVM_CHK_ST_OFFLINE))
-                       state = NVM_CHK_ST_CLOSED;
-       }
-
-       spin_unlock(&line->lock);
-       spin_unlock(&l_mg->gc_lock);
-
-       trace_pblk_line_state(pblk_disk_name(pblk), line->id,
-                                       line->state);
-}
-
-void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
-{
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       struct pblk_line_meta *lm = &pblk->lm;
-       struct pblk_emeta *emeta = line->emeta;
-       struct line_emeta *emeta_buf = emeta->buf;
-       struct wa_counters *wa = emeta_to_wa(lm, emeta_buf);
-
-       /* No need for exact vsc value; avoid a big line lock and take aprox. */
-       memcpy(emeta_to_vsc(pblk, emeta_buf), l_mg->vsc_list, lm->vsc_list_len);
-       memcpy(emeta_to_bb(emeta_buf), line->blk_bitmap, lm->blk_bitmap_len);
-
-       wa->user = cpu_to_le64(atomic64_read(&pblk->user_wa));
-       wa->pad = cpu_to_le64(atomic64_read(&pblk->pad_wa));
-       wa->gc = cpu_to_le64(atomic64_read(&pblk->gc_wa));
-
-       if (le32_to_cpu(emeta_buf->header.identifier) != PBLK_MAGIC) {
-               emeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
-               export_guid(emeta_buf->header.uuid, &pblk->instance_uuid);
-               emeta_buf->header.id = cpu_to_le32(line->id);
-               emeta_buf->header.type = cpu_to_le16(line->type);
-               emeta_buf->header.version_major = EMETA_VERSION_MAJOR;
-               emeta_buf->header.version_minor = EMETA_VERSION_MINOR;
-               emeta_buf->header.crc = cpu_to_le32(
-                       pblk_calc_meta_header_crc(pblk, &emeta_buf->header));
-       }
-
-       emeta_buf->nr_valid_lbas = cpu_to_le64(line->nr_valid_lbas);
-       emeta_buf->crc = cpu_to_le32(pblk_calc_emeta_crc(pblk, emeta_buf));
-
-       spin_lock(&l_mg->close_lock);
-       spin_lock(&line->lock);
-
-       /* Update the in-memory start address for emeta, in case it has
-        * shifted due to write errors
-        */
-       if (line->emeta_ssec != line->cur_sec)
-               line->emeta_ssec = line->cur_sec;
-
-       list_add_tail(&line->list, &l_mg->emeta_list);
-       spin_unlock(&line->lock);
-       spin_unlock(&l_mg->close_lock);
-
-       pblk_line_should_sync_meta(pblk);
-}
-
-static void pblk_save_lba_list(struct pblk *pblk, struct pblk_line *line)
-{
-       struct pblk_line_meta *lm = &pblk->lm;
-       unsigned int lba_list_size = lm->emeta_len[2];
-       struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
-       struct pblk_emeta *emeta = line->emeta;
-
-       w_err_gc->lba_list = kvmalloc(lba_list_size, GFP_KERNEL);
-       memcpy(w_err_gc->lba_list, emeta_to_lbas(pblk, emeta->buf),
-                               lba_list_size);
-}
-
-void pblk_line_close_ws(struct work_struct *work)
-{
-       struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
-                                                                       ws);
-       struct pblk *pblk = line_ws->pblk;
-       struct pblk_line *line = line_ws->line;
-       struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
-
-       /* Write errors makes the emeta start address stored in smeta invalid,
-        * so keep a copy of the lba list until we've gc'd the line
-        */
-       if (w_err_gc->has_write_err)
-               pblk_save_lba_list(pblk, line);
-
-       pblk_line_close(pblk, line);
-       mempool_free(line_ws, &pblk->gen_ws_pool);
-}
-
-void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
-                     void (*work)(struct work_struct *), gfp_t gfp_mask,
-                     struct workqueue_struct *wq)
-{
-       struct pblk_line_ws *line_ws;
-
-       line_ws = mempool_alloc(&pblk->gen_ws_pool, gfp_mask);
-       if (!line_ws) {
-               pblk_err(pblk, "pblk: could not allocate memory\n");
-               return;
-       }
-
-       line_ws->pblk = pblk;
-       line_ws->line = line;
-       line_ws->priv = priv;
-
-       INIT_WORK(&line_ws->ws, work);
-       queue_work(wq, &line_ws->ws);
-}
-
-static void __pblk_down_chunk(struct pblk *pblk, int pos)
-{
-       struct pblk_lun *rlun = &pblk->luns[pos];
-       int ret;
-
-       /*
-        * Only send one inflight I/O per LUN. Since we map at a page
-        * granurality, all ppas in the I/O will map to the same LUN
-        */
-
-       ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(30000));
-       if (ret == -ETIME || ret == -EINTR)
-               pblk_err(pblk, "taking lun semaphore timed out: err %d\n",
-                               -ret);
-}
-
-void pblk_down_chunk(struct pblk *pblk, struct ppa_addr ppa)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       int pos = pblk_ppa_to_pos(geo, ppa);
-
-       __pblk_down_chunk(pblk, pos);
-}
-
-void pblk_down_rq(struct pblk *pblk, struct ppa_addr ppa,
-                 unsigned long *lun_bitmap)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       int pos = pblk_ppa_to_pos(geo, ppa);
-
-       /* If the LUN has been locked for this same request, do no attempt to
-        * lock it again
-        */
-       if (test_and_set_bit(pos, lun_bitmap))
-               return;
-
-       __pblk_down_chunk(pblk, pos);
-}
-
-void pblk_up_chunk(struct pblk *pblk, struct ppa_addr ppa)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct pblk_lun *rlun;
-       int pos = pblk_ppa_to_pos(geo, ppa);
-
-       rlun = &pblk->luns[pos];
-       up(&rlun->wr_sem);
-}
-
-void pblk_up_rq(struct pblk *pblk, unsigned long *lun_bitmap)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct pblk_lun *rlun;
-       int num_lun = geo->all_luns;
-       int bit = -1;
-
-       while ((bit = find_next_bit(lun_bitmap, num_lun, bit + 1)) < num_lun) {
-               rlun = &pblk->luns[bit];
-               up(&rlun->wr_sem);
-       }
-}
-
-void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
-{
-       struct ppa_addr ppa_l2p;
-
-       /* logic error: lba out-of-bounds. Ignore update */
-       if (!(lba < pblk->capacity)) {
-               WARN(1, "pblk: corrupted L2P map request\n");
-               return;
-       }
-
-       spin_lock(&pblk->trans_lock);
-       ppa_l2p = pblk_trans_map_get(pblk, lba);
-
-       if (!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p))
-               pblk_map_invalidate(pblk, ppa_l2p);
-
-       pblk_trans_map_set(pblk, lba, ppa);
-       spin_unlock(&pblk->trans_lock);
-}
-
-void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
-{
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       /* Callers must ensure that the ppa points to a cache address */
-       BUG_ON(!pblk_addr_in_cache(ppa));
-       BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
-#endif
-
-       pblk_update_map(pblk, lba, ppa);
-}
-
-int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa_new,
-                      struct pblk_line *gc_line, u64 paddr_gc)
-{
-       struct ppa_addr ppa_l2p, ppa_gc;
-       int ret = 1;
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       /* Callers must ensure that the ppa points to a cache address */
-       BUG_ON(!pblk_addr_in_cache(ppa_new));
-       BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa_new)));
-#endif
-
-       /* logic error: lba out-of-bounds. Ignore update */
-       if (!(lba < pblk->capacity)) {
-               WARN(1, "pblk: corrupted L2P map request\n");
-               return 0;
-       }
-
-       spin_lock(&pblk->trans_lock);
-       ppa_l2p = pblk_trans_map_get(pblk, lba);
-       ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, gc_line->id);
-
-       if (!pblk_ppa_comp(ppa_l2p, ppa_gc)) {
-               spin_lock(&gc_line->lock);
-               WARN(!test_bit(paddr_gc, gc_line->invalid_bitmap),
-                                               "pblk: corrupted GC update");
-               spin_unlock(&gc_line->lock);
-
-               ret = 0;
-               goto out;
-       }
-
-       pblk_trans_map_set(pblk, lba, ppa_new);
-out:
-       spin_unlock(&pblk->trans_lock);
-       return ret;
-}
-
-void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
-                        struct ppa_addr ppa_mapped, struct ppa_addr ppa_cache)
-{
-       struct ppa_addr ppa_l2p;
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       /* Callers must ensure that the ppa points to a device address */
-       BUG_ON(pblk_addr_in_cache(ppa_mapped));
-#endif
-       /* Invalidate and discard padded entries */
-       if (lba == ADDR_EMPTY) {
-               atomic64_inc(&pblk->pad_wa);
-#ifdef CONFIG_NVM_PBLK_DEBUG
-               atomic_long_inc(&pblk->padded_wb);
-#endif
-               if (!pblk_ppa_empty(ppa_mapped))
-                       pblk_map_invalidate(pblk, ppa_mapped);
-               return;
-       }
-
-       /* logic error: lba out-of-bounds. Ignore update */
-       if (!(lba < pblk->capacity)) {
-               WARN(1, "pblk: corrupted L2P map request\n");
-               return;
-       }
-
-       spin_lock(&pblk->trans_lock);
-       ppa_l2p = pblk_trans_map_get(pblk, lba);
-
-       /* Do not update L2P if the cacheline has been updated. In this case,
-        * the mapped ppa must be invalidated
-        */
-       if (!pblk_ppa_comp(ppa_l2p, ppa_cache)) {
-               if (!pblk_ppa_empty(ppa_mapped))
-                       pblk_map_invalidate(pblk, ppa_mapped);
-               goto out;
-       }
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       WARN_ON(!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p));
-#endif
-
-       pblk_trans_map_set(pblk, lba, ppa_mapped);
-out:
-       spin_unlock(&pblk->trans_lock);
-}
-
-int pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
-                        sector_t blba, int nr_secs, bool *from_cache)
-{
-       int i;
-
-       spin_lock(&pblk->trans_lock);
-       for (i = 0; i < nr_secs; i++) {
-               struct ppa_addr ppa;
-
-               ppa = ppas[i] = pblk_trans_map_get(pblk, blba + i);
-
-               /* If the L2P entry maps to a line, the reference is valid */
-               if (!pblk_ppa_empty(ppa) && !pblk_addr_in_cache(ppa)) {
-                       struct pblk_line *line = pblk_ppa_to_line(pblk, ppa);
-
-                       if (i > 0 && *from_cache)
-                               break;
-                       *from_cache = false;
-
-                       kref_get(&line->ref);
-               } else {
-                       if (i > 0 && !*from_cache)
-                               break;
-                       *from_cache = true;
-               }
-       }
-       spin_unlock(&pblk->trans_lock);
-       return i;
-}
-
-void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
-                         u64 *lba_list, int nr_secs)
-{
-       u64 lba;
-       int i;
-
-       spin_lock(&pblk->trans_lock);
-       for (i = 0; i < nr_secs; i++) {
-               lba = lba_list[i];
-               if (lba != ADDR_EMPTY) {
-                       /* logic error: lba out-of-bounds. Ignore update */
-                       if (!(lba < pblk->capacity)) {
-                               WARN(1, "pblk: corrupted L2P map request\n");
-                               continue;
-                       }
-                       ppas[i] = pblk_trans_map_get(pblk, lba);
-               }
-       }
-       spin_unlock(&pblk->trans_lock);
-}
-
-void *pblk_get_meta_for_writes(struct pblk *pblk, struct nvm_rq *rqd)
-{
-       void *buffer;
-
-       if (pblk_is_oob_meta_supported(pblk)) {
-               /* Just use OOB metadata buffer as always */
-               buffer = rqd->meta_list;
-       } else {
-               /* We need to reuse last page of request (packed metadata)
-                * in similar way as traditional oob metadata
-                */
-               buffer = page_to_virt(
-                       rqd->bio->bi_io_vec[rqd->bio->bi_vcnt - 1].bv_page);
-       }
-
-       return buffer;
-}
-
-void pblk_get_packed_meta(struct pblk *pblk, struct nvm_rq *rqd)
-{
-       void *meta_list = rqd->meta_list;
-       void *page;
-       int i = 0;
-
-       if (pblk_is_oob_meta_supported(pblk))
-               return;
-
-       page = page_to_virt(rqd->bio->bi_io_vec[rqd->bio->bi_vcnt - 1].bv_page);
-       /* We need to fill oob meta buffer with data from packed metadata */
-       for (; i < rqd->nr_ppas; i++)
-               memcpy(pblk_get_meta(pblk, meta_list, i),
-                       page + (i * sizeof(struct pblk_sec_meta)),
-                       sizeof(struct pblk_sec_meta));
-}
diff --git a/drivers/lightnvm/pblk-gc.c b/drivers/lightnvm/pblk-gc.c
deleted file mode 100644 (file)
index b31658b..0000000
+++ /dev/null
@@ -1,726 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2016 CNEX Labs
- * Initial release: Javier Gonzalez <javier@cnexlabs.com>
- *                  Matias Bjorling <matias@cnexlabs.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * pblk-gc.c - pblk's garbage collector
- */
-
-#include "pblk.h"
-#include "pblk-trace.h"
-#include <linux/delay.h>
-
-
-static void pblk_gc_free_gc_rq(struct pblk_gc_rq *gc_rq)
-{
-       vfree(gc_rq->data);
-       kfree(gc_rq);
-}
-
-static int pblk_gc_write(struct pblk *pblk)
-{
-       struct pblk_gc *gc = &pblk->gc;
-       struct pblk_gc_rq *gc_rq, *tgc_rq;
-       LIST_HEAD(w_list);
-
-       spin_lock(&gc->w_lock);
-       if (list_empty(&gc->w_list)) {
-               spin_unlock(&gc->w_lock);
-               return 1;
-       }
-
-       list_cut_position(&w_list, &gc->w_list, gc->w_list.prev);
-       gc->w_entries = 0;
-       spin_unlock(&gc->w_lock);
-
-       list_for_each_entry_safe(gc_rq, tgc_rq, &w_list, list) {
-               pblk_write_gc_to_cache(pblk, gc_rq);
-               list_del(&gc_rq->list);
-               kref_put(&gc_rq->line->ref, pblk_line_put);
-               pblk_gc_free_gc_rq(gc_rq);
-       }
-
-       return 0;
-}
-
-static void pblk_gc_writer_kick(struct pblk_gc *gc)
-{
-       wake_up_process(gc->gc_writer_ts);
-}
-
-void pblk_put_line_back(struct pblk *pblk, struct pblk_line *line)
-{
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       struct list_head *move_list;
-
-       spin_lock(&l_mg->gc_lock);
-       spin_lock(&line->lock);
-       WARN_ON(line->state != PBLK_LINESTATE_GC);
-       line->state = PBLK_LINESTATE_CLOSED;
-       trace_pblk_line_state(pblk_disk_name(pblk), line->id,
-                                       line->state);
-
-       /* We need to reset gc_group in order to ensure that
-        * pblk_line_gc_list will return proper move_list
-        * since right now current line is not on any of the
-        * gc lists.
-        */
-       line->gc_group = PBLK_LINEGC_NONE;
-       move_list = pblk_line_gc_list(pblk, line);
-       spin_unlock(&line->lock);
-       list_add_tail(&line->list, move_list);
-       spin_unlock(&l_mg->gc_lock);
-}
-
-static void pblk_gc_line_ws(struct work_struct *work)
-{
-       struct pblk_line_ws *gc_rq_ws = container_of(work,
-                                               struct pblk_line_ws, ws);
-       struct pblk *pblk = gc_rq_ws->pblk;
-       struct pblk_gc *gc = &pblk->gc;
-       struct pblk_line *line = gc_rq_ws->line;
-       struct pblk_gc_rq *gc_rq = gc_rq_ws->priv;
-       int ret;
-
-       up(&gc->gc_sem);
-
-       /* Read from GC victim block */
-       ret = pblk_submit_read_gc(pblk, gc_rq);
-       if (ret) {
-               line->w_err_gc->has_gc_err = 1;
-               goto out;
-       }
-
-       if (!gc_rq->secs_to_gc)
-               goto out;
-
-retry:
-       spin_lock(&gc->w_lock);
-       if (gc->w_entries >= PBLK_GC_RQ_QD) {
-               spin_unlock(&gc->w_lock);
-               pblk_gc_writer_kick(&pblk->gc);
-               usleep_range(128, 256);
-               goto retry;
-       }
-       gc->w_entries++;
-       list_add_tail(&gc_rq->list, &gc->w_list);
-       spin_unlock(&gc->w_lock);
-
-       pblk_gc_writer_kick(&pblk->gc);
-
-       kfree(gc_rq_ws);
-       return;
-
-out:
-       pblk_gc_free_gc_rq(gc_rq);
-       kref_put(&line->ref, pblk_line_put);
-       kfree(gc_rq_ws);
-}
-
-static __le64 *get_lba_list_from_emeta(struct pblk *pblk,
-                                      struct pblk_line *line)
-{
-       struct line_emeta *emeta_buf;
-       struct pblk_line_meta *lm = &pblk->lm;
-       unsigned int lba_list_size = lm->emeta_len[2];
-       __le64 *lba_list;
-       int ret;
-
-       emeta_buf = kvmalloc(lm->emeta_len[0], GFP_KERNEL);
-       if (!emeta_buf)
-               return NULL;
-
-       ret = pblk_line_emeta_read(pblk, line, emeta_buf);
-       if (ret) {
-               pblk_err(pblk, "line %d read emeta failed (%d)\n",
-                               line->id, ret);
-               kvfree(emeta_buf);
-               return NULL;
-       }
-
-       /* If this read fails, it means that emeta is corrupted.
-        * For now, leave the line untouched.
-        * TODO: Implement a recovery routine that scans and moves
-        * all sectors on the line.
-        */
-
-       ret = pblk_recov_check_emeta(pblk, emeta_buf);
-       if (ret) {
-               pblk_err(pblk, "inconsistent emeta (line %d)\n",
-                               line->id);
-               kvfree(emeta_buf);
-               return NULL;
-       }
-
-       lba_list = kvmalloc(lba_list_size, GFP_KERNEL);
-
-       if (lba_list)
-               memcpy(lba_list, emeta_to_lbas(pblk, emeta_buf), lba_list_size);
-
-       kvfree(emeta_buf);
-
-       return lba_list;
-}
-
-static void pblk_gc_line_prepare_ws(struct work_struct *work)
-{
-       struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
-                                                                       ws);
-       struct pblk *pblk = line_ws->pblk;
-       struct pblk_line *line = line_ws->line;
-       struct pblk_line_meta *lm = &pblk->lm;
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct pblk_gc *gc = &pblk->gc;
-       struct pblk_line_ws *gc_rq_ws;
-       struct pblk_gc_rq *gc_rq;
-       __le64 *lba_list;
-       unsigned long *invalid_bitmap;
-       int sec_left, nr_secs, bit;
-
-       invalid_bitmap = kmalloc(lm->sec_bitmap_len, GFP_KERNEL);
-       if (!invalid_bitmap)
-               goto fail_free_ws;
-
-       if (line->w_err_gc->has_write_err) {
-               lba_list = line->w_err_gc->lba_list;
-               line->w_err_gc->lba_list = NULL;
-       } else {
-               lba_list = get_lba_list_from_emeta(pblk, line);
-               if (!lba_list) {
-                       pblk_err(pblk, "could not interpret emeta (line %d)\n",
-                                       line->id);
-                       goto fail_free_invalid_bitmap;
-               }
-       }
-
-       spin_lock(&line->lock);
-       bitmap_copy(invalid_bitmap, line->invalid_bitmap, lm->sec_per_line);
-       sec_left = pblk_line_vsc(line);
-       spin_unlock(&line->lock);
-
-       if (sec_left < 0) {
-               pblk_err(pblk, "corrupted GC line (%d)\n", line->id);
-               goto fail_free_lba_list;
-       }
-
-       bit = -1;
-next_rq:
-       gc_rq = kmalloc(sizeof(struct pblk_gc_rq), GFP_KERNEL);
-       if (!gc_rq)
-               goto fail_free_lba_list;
-
-       nr_secs = 0;
-       do {
-               bit = find_next_zero_bit(invalid_bitmap, lm->sec_per_line,
-                                                               bit + 1);
-               if (bit > line->emeta_ssec)
-                       break;
-
-               gc_rq->paddr_list[nr_secs] = bit;
-               gc_rq->lba_list[nr_secs++] = le64_to_cpu(lba_list[bit]);
-       } while (nr_secs < pblk->max_write_pgs);
-
-       if (unlikely(!nr_secs)) {
-               kfree(gc_rq);
-               goto out;
-       }
-
-       gc_rq->nr_secs = nr_secs;
-       gc_rq->line = line;
-
-       gc_rq->data = vmalloc(array_size(gc_rq->nr_secs, geo->csecs));
-       if (!gc_rq->data)
-               goto fail_free_gc_rq;
-
-       gc_rq_ws = kmalloc(sizeof(struct pblk_line_ws), GFP_KERNEL);
-       if (!gc_rq_ws)
-               goto fail_free_gc_data;
-
-       gc_rq_ws->pblk = pblk;
-       gc_rq_ws->line = line;
-       gc_rq_ws->priv = gc_rq;
-
-       /* The write GC path can be much slower than the read GC one due to
-        * the budget imposed by the rate-limiter. Balance in case that we get
-        * back pressure from the write GC path.
-        */
-       while (down_timeout(&gc->gc_sem, msecs_to_jiffies(30000)))
-               io_schedule();
-
-       kref_get(&line->ref);
-
-       INIT_WORK(&gc_rq_ws->ws, pblk_gc_line_ws);
-       queue_work(gc->gc_line_reader_wq, &gc_rq_ws->ws);
-
-       sec_left -= nr_secs;
-       if (sec_left > 0)
-               goto next_rq;
-
-out:
-       kvfree(lba_list);
-       kfree(line_ws);
-       kfree(invalid_bitmap);
-
-       kref_put(&line->ref, pblk_line_put);
-       atomic_dec(&gc->read_inflight_gc);
-
-       return;
-
-fail_free_gc_data:
-       vfree(gc_rq->data);
-fail_free_gc_rq:
-       kfree(gc_rq);
-fail_free_lba_list:
-       kvfree(lba_list);
-fail_free_invalid_bitmap:
-       kfree(invalid_bitmap);
-fail_free_ws:
-       kfree(line_ws);
-
-       /* Line goes back to closed state, so we cannot release additional
-        * reference for line, since we do that only when we want to do
-        * gc to free line state transition.
-        */
-       pblk_put_line_back(pblk, line);
-       atomic_dec(&gc->read_inflight_gc);
-
-       pblk_err(pblk, "failed to GC line %d\n", line->id);
-}
-
-static int pblk_gc_line(struct pblk *pblk, struct pblk_line *line)
-{
-       struct pblk_gc *gc = &pblk->gc;
-       struct pblk_line_ws *line_ws;
-
-       pblk_debug(pblk, "line '%d' being reclaimed for GC\n", line->id);
-
-       line_ws = kmalloc(sizeof(struct pblk_line_ws), GFP_KERNEL);
-       if (!line_ws)
-               return -ENOMEM;
-
-       line_ws->pblk = pblk;
-       line_ws->line = line;
-
-       atomic_inc(&gc->pipeline_gc);
-       INIT_WORK(&line_ws->ws, pblk_gc_line_prepare_ws);
-       queue_work(gc->gc_reader_wq, &line_ws->ws);
-
-       return 0;
-}
-
-static void pblk_gc_reader_kick(struct pblk_gc *gc)
-{
-       wake_up_process(gc->gc_reader_ts);
-}
-
-static void pblk_gc_kick(struct pblk *pblk)
-{
-       struct pblk_gc *gc = &pblk->gc;
-
-       pblk_gc_writer_kick(gc);
-       pblk_gc_reader_kick(gc);
-
-       /* If we're shutting down GC, let's not start it up again */
-       if (gc->gc_enabled) {
-               wake_up_process(gc->gc_ts);
-               mod_timer(&gc->gc_timer,
-                         jiffies + msecs_to_jiffies(GC_TIME_MSECS));
-       }
-}
-
-static int pblk_gc_read(struct pblk *pblk)
-{
-       struct pblk_gc *gc = &pblk->gc;
-       struct pblk_line *line;
-
-       spin_lock(&gc->r_lock);
-       if (list_empty(&gc->r_list)) {
-               spin_unlock(&gc->r_lock);
-               return 1;
-       }
-
-       line = list_first_entry(&gc->r_list, struct pblk_line, list);
-       list_del(&line->list);
-       spin_unlock(&gc->r_lock);
-
-       pblk_gc_kick(pblk);
-
-       if (pblk_gc_line(pblk, line)) {
-               pblk_err(pblk, "failed to GC line %d\n", line->id);
-               /* rollback */
-               spin_lock(&gc->r_lock);
-               list_add_tail(&line->list, &gc->r_list);
-               spin_unlock(&gc->r_lock);
-       }
-
-       return 0;
-}
-
-static struct pblk_line *pblk_gc_get_victim_line(struct pblk *pblk,
-                                                struct list_head *group_list)
-{
-       struct pblk_line *line, *victim;
-       unsigned int line_vsc = ~0x0L, victim_vsc = ~0x0L;
-
-       victim = list_first_entry(group_list, struct pblk_line, list);
-
-       list_for_each_entry(line, group_list, list) {
-               if (!atomic_read(&line->sec_to_update))
-                       line_vsc = le32_to_cpu(*line->vsc);
-               if (line_vsc < victim_vsc) {
-                       victim = line;
-                       victim_vsc = le32_to_cpu(*victim->vsc);
-               }
-       }
-
-       if (victim_vsc == ~0x0)
-               return NULL;
-
-       return victim;
-}
-
-static bool pblk_gc_should_run(struct pblk_gc *gc, struct pblk_rl *rl)
-{
-       unsigned int nr_blocks_free, nr_blocks_need;
-       unsigned int werr_lines = atomic_read(&rl->werr_lines);
-
-       nr_blocks_need = pblk_rl_high_thrs(rl);
-       nr_blocks_free = pblk_rl_nr_free_blks(rl);
-
-       /* This is not critical, no need to take lock here */
-       return ((werr_lines > 0) ||
-               ((gc->gc_active) && (nr_blocks_need > nr_blocks_free)));
-}
-
-void pblk_gc_free_full_lines(struct pblk *pblk)
-{
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       struct pblk_gc *gc = &pblk->gc;
-       struct pblk_line *line;
-
-       do {
-               spin_lock(&l_mg->gc_lock);
-               if (list_empty(&l_mg->gc_full_list)) {
-                       spin_unlock(&l_mg->gc_lock);
-                       return;
-               }
-
-               line = list_first_entry(&l_mg->gc_full_list,
-                                                       struct pblk_line, list);
-
-               spin_lock(&line->lock);
-               WARN_ON(line->state != PBLK_LINESTATE_CLOSED);
-               line->state = PBLK_LINESTATE_GC;
-               trace_pblk_line_state(pblk_disk_name(pblk), line->id,
-                                       line->state);
-               spin_unlock(&line->lock);
-
-               list_del(&line->list);
-               spin_unlock(&l_mg->gc_lock);
-
-               atomic_inc(&gc->pipeline_gc);
-               kref_put(&line->ref, pblk_line_put);
-       } while (1);
-}
-
-/*
- * Lines with no valid sectors will be returned to the free list immediately. If
- * GC is activated - either because the free block count is under the determined
- * threshold, or because it is being forced from user space - only lines with a
- * high count of invalid sectors will be recycled.
- */
-static void pblk_gc_run(struct pblk *pblk)
-{
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       struct pblk_gc *gc = &pblk->gc;
-       struct pblk_line *line;
-       struct list_head *group_list;
-       bool run_gc;
-       int read_inflight_gc, gc_group = 0, prev_group = 0;
-
-       pblk_gc_free_full_lines(pblk);
-
-       run_gc = pblk_gc_should_run(&pblk->gc, &pblk->rl);
-       if (!run_gc || (atomic_read(&gc->read_inflight_gc) >= PBLK_GC_L_QD))
-               return;
-
-next_gc_group:
-       group_list = l_mg->gc_lists[gc_group++];
-
-       do {
-               spin_lock(&l_mg->gc_lock);
-
-               line = pblk_gc_get_victim_line(pblk, group_list);
-               if (!line) {
-                       spin_unlock(&l_mg->gc_lock);
-                       break;
-               }
-
-               spin_lock(&line->lock);
-               WARN_ON(line->state != PBLK_LINESTATE_CLOSED);
-               line->state = PBLK_LINESTATE_GC;
-               trace_pblk_line_state(pblk_disk_name(pblk), line->id,
-                                       line->state);
-               spin_unlock(&line->lock);
-
-               list_del(&line->list);
-               spin_unlock(&l_mg->gc_lock);
-
-               spin_lock(&gc->r_lock);
-               list_add_tail(&line->list, &gc->r_list);
-               spin_unlock(&gc->r_lock);
-
-               read_inflight_gc = atomic_inc_return(&gc->read_inflight_gc);
-               pblk_gc_reader_kick(gc);
-
-               prev_group = 1;
-
-               /* No need to queue up more GC lines than we can handle */
-               run_gc = pblk_gc_should_run(&pblk->gc, &pblk->rl);
-               if (!run_gc || read_inflight_gc >= PBLK_GC_L_QD)
-                       break;
-       } while (1);
-
-       if (!prev_group && pblk->rl.rb_state > gc_group &&
-                                               gc_group < PBLK_GC_NR_LISTS)
-               goto next_gc_group;
-}
-
-static void pblk_gc_timer(struct timer_list *t)
-{
-       struct pblk *pblk = from_timer(pblk, t, gc.gc_timer);
-
-       pblk_gc_kick(pblk);
-}
-
-static int pblk_gc_ts(void *data)
-{
-       struct pblk *pblk = data;
-
-       while (!kthread_should_stop()) {
-               pblk_gc_run(pblk);
-               set_current_state(TASK_INTERRUPTIBLE);
-               io_schedule();
-       }
-
-       return 0;
-}
-
-static int pblk_gc_writer_ts(void *data)
-{
-       struct pblk *pblk = data;
-
-       while (!kthread_should_stop()) {
-               if (!pblk_gc_write(pblk))
-                       continue;
-               set_current_state(TASK_INTERRUPTIBLE);
-               io_schedule();
-       }
-
-       return 0;
-}
-
-static int pblk_gc_reader_ts(void *data)
-{
-       struct pblk *pblk = data;
-       struct pblk_gc *gc = &pblk->gc;
-
-       while (!kthread_should_stop()) {
-               if (!pblk_gc_read(pblk))
-                       continue;
-               set_current_state(TASK_INTERRUPTIBLE);
-               io_schedule();
-       }
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       pblk_info(pblk, "flushing gc pipeline, %d lines left\n",
-               atomic_read(&gc->pipeline_gc));
-#endif
-
-       do {
-               if (!atomic_read(&gc->pipeline_gc))
-                       break;
-
-               schedule();
-       } while (1);
-
-       return 0;
-}
-
-static void pblk_gc_start(struct pblk *pblk)
-{
-       pblk->gc.gc_active = 1;
-       pblk_debug(pblk, "gc start\n");
-}
-
-void pblk_gc_should_start(struct pblk *pblk)
-{
-       struct pblk_gc *gc = &pblk->gc;
-
-       if (gc->gc_enabled && !gc->gc_active) {
-               pblk_gc_start(pblk);
-               pblk_gc_kick(pblk);
-       }
-}
-
-void pblk_gc_should_stop(struct pblk *pblk)
-{
-       struct pblk_gc *gc = &pblk->gc;
-
-       if (gc->gc_active && !gc->gc_forced)
-               gc->gc_active = 0;
-}
-
-void pblk_gc_should_kick(struct pblk *pblk)
-{
-       pblk_rl_update_rates(&pblk->rl);
-}
-
-void pblk_gc_sysfs_state_show(struct pblk *pblk, int *gc_enabled,
-                             int *gc_active)
-{
-       struct pblk_gc *gc = &pblk->gc;
-
-       spin_lock(&gc->lock);
-       *gc_enabled = gc->gc_enabled;
-       *gc_active = gc->gc_active;
-       spin_unlock(&gc->lock);
-}
-
-int pblk_gc_sysfs_force(struct pblk *pblk, int force)
-{
-       struct pblk_gc *gc = &pblk->gc;
-
-       if (force < 0 || force > 1)
-               return -EINVAL;
-
-       spin_lock(&gc->lock);
-       gc->gc_forced = force;
-
-       if (force)
-               gc->gc_enabled = 1;
-       else
-               gc->gc_enabled = 0;
-       spin_unlock(&gc->lock);
-
-       pblk_gc_should_start(pblk);
-
-       return 0;
-}
-
-int pblk_gc_init(struct pblk *pblk)
-{
-       struct pblk_gc *gc = &pblk->gc;
-       int ret;
-
-       gc->gc_ts = kthread_create(pblk_gc_ts, pblk, "pblk-gc-ts");
-       if (IS_ERR(gc->gc_ts)) {
-               pblk_err(pblk, "could not allocate GC main kthread\n");
-               return PTR_ERR(gc->gc_ts);
-       }
-
-       gc->gc_writer_ts = kthread_create(pblk_gc_writer_ts, pblk,
-                                                       "pblk-gc-writer-ts");
-       if (IS_ERR(gc->gc_writer_ts)) {
-               pblk_err(pblk, "could not allocate GC writer kthread\n");
-               ret = PTR_ERR(gc->gc_writer_ts);
-               goto fail_free_main_kthread;
-       }
-
-       gc->gc_reader_ts = kthread_create(pblk_gc_reader_ts, pblk,
-                                                       "pblk-gc-reader-ts");
-       if (IS_ERR(gc->gc_reader_ts)) {
-               pblk_err(pblk, "could not allocate GC reader kthread\n");
-               ret = PTR_ERR(gc->gc_reader_ts);
-               goto fail_free_writer_kthread;
-       }
-
-       timer_setup(&gc->gc_timer, pblk_gc_timer, 0);
-       mod_timer(&gc->gc_timer, jiffies + msecs_to_jiffies(GC_TIME_MSECS));
-
-       gc->gc_active = 0;
-       gc->gc_forced = 0;
-       gc->gc_enabled = 1;
-       gc->w_entries = 0;
-       atomic_set(&gc->read_inflight_gc, 0);
-       atomic_set(&gc->pipeline_gc, 0);
-
-       /* Workqueue that reads valid sectors from a line and submit them to the
-        * GC writer to be recycled.
-        */
-       gc->gc_line_reader_wq = alloc_workqueue("pblk-gc-line-reader-wq",
-                       WQ_MEM_RECLAIM | WQ_UNBOUND, PBLK_GC_MAX_READERS);
-       if (!gc->gc_line_reader_wq) {
-               pblk_err(pblk, "could not allocate GC line reader workqueue\n");
-               ret = -ENOMEM;
-               goto fail_free_reader_kthread;
-       }
-
-       /* Workqueue that prepare lines for GC */
-       gc->gc_reader_wq = alloc_workqueue("pblk-gc-line_wq",
-                                       WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
-       if (!gc->gc_reader_wq) {
-               pblk_err(pblk, "could not allocate GC reader workqueue\n");
-               ret = -ENOMEM;
-               goto fail_free_reader_line_wq;
-       }
-
-       spin_lock_init(&gc->lock);
-       spin_lock_init(&gc->w_lock);
-       spin_lock_init(&gc->r_lock);
-
-       sema_init(&gc->gc_sem, PBLK_GC_RQ_QD);
-
-       INIT_LIST_HEAD(&gc->w_list);
-       INIT_LIST_HEAD(&gc->r_list);
-
-       return 0;
-
-fail_free_reader_line_wq:
-       destroy_workqueue(gc->gc_line_reader_wq);
-fail_free_reader_kthread:
-       kthread_stop(gc->gc_reader_ts);
-fail_free_writer_kthread:
-       kthread_stop(gc->gc_writer_ts);
-fail_free_main_kthread:
-       kthread_stop(gc->gc_ts);
-
-       return ret;
-}
-
-void pblk_gc_exit(struct pblk *pblk, bool graceful)
-{
-       struct pblk_gc *gc = &pblk->gc;
-
-       gc->gc_enabled = 0;
-       del_timer_sync(&gc->gc_timer);
-       gc->gc_active = 0;
-
-       if (gc->gc_ts)
-               kthread_stop(gc->gc_ts);
-
-       if (gc->gc_reader_ts)
-               kthread_stop(gc->gc_reader_ts);
-
-       if (graceful) {
-               flush_workqueue(gc->gc_reader_wq);
-               flush_workqueue(gc->gc_line_reader_wq);
-       }
-
-       destroy_workqueue(gc->gc_reader_wq);
-       destroy_workqueue(gc->gc_line_reader_wq);
-
-       if (gc->gc_writer_ts)
-               kthread_stop(gc->gc_writer_ts);
-}
diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c
deleted file mode 100644 (file)
index 5924f09..0000000
+++ /dev/null
@@ -1,1324 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2015 IT University of Copenhagen (rrpc.c)
- * Copyright (C) 2016 CNEX Labs
- * Initial release: Javier Gonzalez <javier@cnexlabs.com>
- *                  Matias Bjorling <matias@cnexlabs.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * Implementation of a physical block-device target for Open-channel SSDs.
- *
- * pblk-init.c - pblk's initialization.
- */
-
-#include "pblk.h"
-#include "pblk-trace.h"
-
-static unsigned int write_buffer_size;
-
-module_param(write_buffer_size, uint, 0644);
-MODULE_PARM_DESC(write_buffer_size, "number of entries in a write buffer");
-
-struct pblk_global_caches {
-       struct kmem_cache       *ws;
-       struct kmem_cache       *rec;
-       struct kmem_cache       *g_rq;
-       struct kmem_cache       *w_rq;
-
-       struct kref             kref;
-
-       struct mutex            mutex; /* Ensures consistency between
-                                       * caches and kref
-                                       */
-};
-
-static struct pblk_global_caches pblk_caches = {
-       .mutex = __MUTEX_INITIALIZER(pblk_caches.mutex),
-       .kref = KREF_INIT(0),
-};
-
-struct bio_set pblk_bio_set;
-
-static blk_qc_t pblk_submit_bio(struct bio *bio)
-{
-       struct pblk *pblk = bio->bi_bdev->bd_disk->queue->queuedata;
-
-       if (bio_op(bio) == REQ_OP_DISCARD) {
-               pblk_discard(pblk, bio);
-               if (!(bio->bi_opf & REQ_PREFLUSH)) {
-                       bio_endio(bio);
-                       return BLK_QC_T_NONE;
-               }
-       }
-
-       /* Read requests must be <= 256kb due to NVMe's 64 bit completion bitmap
-        * constraint. Writes can be of arbitrary size.
-        */
-       if (bio_data_dir(bio) == READ) {
-               blk_queue_split(&bio);
-               pblk_submit_read(pblk, bio);
-       } else {
-               /* Prevent deadlock in the case of a modest LUN configuration
-                * and large user I/Os. Unless stalled, the rate limiter
-                * leaves at least 256KB available for user I/O.
-                */
-               if (pblk_get_secs(bio) > pblk_rl_max_io(&pblk->rl))
-                       blk_queue_split(&bio);
-
-               pblk_write_to_cache(pblk, bio, PBLK_IOTYPE_USER);
-       }
-
-       return BLK_QC_T_NONE;
-}
-
-static const struct block_device_operations pblk_bops = {
-       .owner          = THIS_MODULE,
-       .submit_bio     = pblk_submit_bio,
-};
-
-
-static size_t pblk_trans_map_size(struct pblk *pblk)
-{
-       int entry_size = 8;
-
-       if (pblk->addrf_len < 32)
-               entry_size = 4;
-
-       return entry_size * pblk->capacity;
-}
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-static u32 pblk_l2p_crc(struct pblk *pblk)
-{
-       size_t map_size;
-       u32 crc = ~(u32)0;
-
-       map_size = pblk_trans_map_size(pblk);
-       crc = crc32_le(crc, pblk->trans_map, map_size);
-       return crc;
-}
-#endif
-
-static void pblk_l2p_free(struct pblk *pblk)
-{
-       vfree(pblk->trans_map);
-}
-
-static int pblk_l2p_recover(struct pblk *pblk, bool factory_init)
-{
-       struct pblk_line *line = NULL;
-
-       if (factory_init) {
-               guid_gen(&pblk->instance_uuid);
-       } else {
-               line = pblk_recov_l2p(pblk);
-               if (IS_ERR(line)) {
-                       pblk_err(pblk, "could not recover l2p table\n");
-                       return -EFAULT;
-               }
-       }
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       pblk_info(pblk, "init: L2P CRC: %x\n", pblk_l2p_crc(pblk));
-#endif
-
-       /* Free full lines directly as GC has not been started yet */
-       pblk_gc_free_full_lines(pblk);
-
-       if (!line) {
-               /* Configure next line for user data */
-               line = pblk_line_get_first_data(pblk);
-               if (!line)
-                       return -EFAULT;
-       }
-
-       return 0;
-}
-
-static int pblk_l2p_init(struct pblk *pblk, bool factory_init)
-{
-       sector_t i;
-       struct ppa_addr ppa;
-       size_t map_size;
-       int ret = 0;
-
-       map_size = pblk_trans_map_size(pblk);
-       pblk->trans_map = __vmalloc(map_size, GFP_KERNEL | __GFP_NOWARN |
-                                   __GFP_RETRY_MAYFAIL | __GFP_HIGHMEM);
-       if (!pblk->trans_map) {
-               pblk_err(pblk, "failed to allocate L2P (need %zu of memory)\n",
-                               map_size);
-               return -ENOMEM;
-       }
-
-       pblk_ppa_set_empty(&ppa);
-
-       for (i = 0; i < pblk->capacity; i++)
-               pblk_trans_map_set(pblk, i, ppa);
-
-       ret = pblk_l2p_recover(pblk, factory_init);
-       if (ret)
-               vfree(pblk->trans_map);
-
-       return ret;
-}
-
-static void pblk_rwb_free(struct pblk *pblk)
-{
-       if (pblk_rb_tear_down_check(&pblk->rwb))
-               pblk_err(pblk, "write buffer error on tear down\n");
-
-       pblk_rb_free(&pblk->rwb);
-}
-
-static int pblk_rwb_init(struct pblk *pblk)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       unsigned long buffer_size;
-       int pgs_in_buffer, threshold;
-
-       threshold = geo->mw_cunits * geo->all_luns;
-       pgs_in_buffer = (max(geo->mw_cunits, geo->ws_opt) + geo->ws_opt)
-                                                               * geo->all_luns;
-
-       if (write_buffer_size && (write_buffer_size > pgs_in_buffer))
-               buffer_size = write_buffer_size;
-       else
-               buffer_size = pgs_in_buffer;
-
-       return pblk_rb_init(&pblk->rwb, buffer_size, threshold, geo->csecs);
-}
-
-static int pblk_set_addrf_12(struct pblk *pblk, struct nvm_geo *geo,
-                            struct nvm_addrf_12 *dst)
-{
-       struct nvm_addrf_12 *src = (struct nvm_addrf_12 *)&geo->addrf;
-       int power_len;
-
-       /* Re-calculate channel and lun format to adapt to configuration */
-       power_len = get_count_order(geo->num_ch);
-       if (1 << power_len != geo->num_ch) {
-               pblk_err(pblk, "supports only power-of-two channel config.\n");
-               return -EINVAL;
-       }
-       dst->ch_len = power_len;
-
-       power_len = get_count_order(geo->num_lun);
-       if (1 << power_len != geo->num_lun) {
-               pblk_err(pblk, "supports only power-of-two LUN config.\n");
-               return -EINVAL;
-       }
-       dst->lun_len = power_len;
-
-       dst->blk_len = src->blk_len;
-       dst->pg_len = src->pg_len;
-       dst->pln_len = src->pln_len;
-       dst->sec_len = src->sec_len;
-
-       dst->sec_offset = 0;
-       dst->pln_offset = dst->sec_len;
-       dst->ch_offset = dst->pln_offset + dst->pln_len;
-       dst->lun_offset = dst->ch_offset + dst->ch_len;
-       dst->pg_offset = dst->lun_offset + dst->lun_len;
-       dst->blk_offset = dst->pg_offset + dst->pg_len;
-
-       dst->sec_mask = ((1ULL << dst->sec_len) - 1) << dst->sec_offset;
-       dst->pln_mask = ((1ULL << dst->pln_len) - 1) << dst->pln_offset;
-       dst->ch_mask = ((1ULL << dst->ch_len) - 1) << dst->ch_offset;
-       dst->lun_mask = ((1ULL << dst->lun_len) - 1) << dst->lun_offset;
-       dst->pg_mask = ((1ULL << dst->pg_len) - 1) << dst->pg_offset;
-       dst->blk_mask = ((1ULL << dst->blk_len) - 1) << dst->blk_offset;
-
-       return dst->blk_offset + src->blk_len;
-}
-
-static int pblk_set_addrf_20(struct nvm_geo *geo, struct nvm_addrf *adst,
-                            struct pblk_addrf *udst)
-{
-       struct nvm_addrf *src = &geo->addrf;
-
-       adst->ch_len = get_count_order(geo->num_ch);
-       adst->lun_len = get_count_order(geo->num_lun);
-       adst->chk_len = src->chk_len;
-       adst->sec_len = src->sec_len;
-
-       adst->sec_offset = 0;
-       adst->ch_offset = adst->sec_len;
-       adst->lun_offset = adst->ch_offset + adst->ch_len;
-       adst->chk_offset = adst->lun_offset + adst->lun_len;
-
-       adst->sec_mask = ((1ULL << adst->sec_len) - 1) << adst->sec_offset;
-       adst->chk_mask = ((1ULL << adst->chk_len) - 1) << adst->chk_offset;
-       adst->lun_mask = ((1ULL << adst->lun_len) - 1) << adst->lun_offset;
-       adst->ch_mask = ((1ULL << adst->ch_len) - 1) << adst->ch_offset;
-
-       udst->sec_stripe = geo->ws_opt;
-       udst->ch_stripe = geo->num_ch;
-       udst->lun_stripe = geo->num_lun;
-
-       udst->sec_lun_stripe = udst->sec_stripe * udst->ch_stripe;
-       udst->sec_ws_stripe = udst->sec_lun_stripe * udst->lun_stripe;
-
-       return adst->chk_offset + adst->chk_len;
-}
-
-static int pblk_set_addrf(struct pblk *pblk)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       int mod;
-
-       switch (geo->version) {
-       case NVM_OCSSD_SPEC_12:
-               div_u64_rem(geo->clba, pblk->min_write_pgs, &mod);
-               if (mod) {
-                       pblk_err(pblk, "bad configuration of sectors/pages\n");
-                       return -EINVAL;
-               }
-
-               pblk->addrf_len = pblk_set_addrf_12(pblk, geo,
-                                                       (void *)&pblk->addrf);
-               break;
-       case NVM_OCSSD_SPEC_20:
-               pblk->addrf_len = pblk_set_addrf_20(geo, (void *)&pblk->addrf,
-                                                       &pblk->uaddrf);
-               break;
-       default:
-               pblk_err(pblk, "OCSSD revision not supported (%d)\n",
-                                                               geo->version);
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int pblk_create_global_caches(void)
-{
-
-       pblk_caches.ws = kmem_cache_create("pblk_blk_ws",
-                               sizeof(struct pblk_line_ws), 0, 0, NULL);
-       if (!pblk_caches.ws)
-               return -ENOMEM;
-
-       pblk_caches.rec = kmem_cache_create("pblk_rec",
-                               sizeof(struct pblk_rec_ctx), 0, 0, NULL);
-       if (!pblk_caches.rec)
-               goto fail_destroy_ws;
-
-       pblk_caches.g_rq = kmem_cache_create("pblk_g_rq", pblk_g_rq_size,
-                               0, 0, NULL);
-       if (!pblk_caches.g_rq)
-               goto fail_destroy_rec;
-
-       pblk_caches.w_rq = kmem_cache_create("pblk_w_rq", pblk_w_rq_size,
-                               0, 0, NULL);
-       if (!pblk_caches.w_rq)
-               goto fail_destroy_g_rq;
-
-       return 0;
-
-fail_destroy_g_rq:
-       kmem_cache_destroy(pblk_caches.g_rq);
-fail_destroy_rec:
-       kmem_cache_destroy(pblk_caches.rec);
-fail_destroy_ws:
-       kmem_cache_destroy(pblk_caches.ws);
-
-       return -ENOMEM;
-}
-
-static int pblk_get_global_caches(void)
-{
-       int ret = 0;
-
-       mutex_lock(&pblk_caches.mutex);
-
-       if (kref_get_unless_zero(&pblk_caches.kref))
-               goto out;
-
-       ret = pblk_create_global_caches();
-       if (!ret)
-               kref_init(&pblk_caches.kref);
-
-out:
-       mutex_unlock(&pblk_caches.mutex);
-       return ret;
-}
-
-static void pblk_destroy_global_caches(struct kref *ref)
-{
-       struct pblk_global_caches *c;
-
-       c = container_of(ref, struct pblk_global_caches, kref);
-
-       kmem_cache_destroy(c->ws);
-       kmem_cache_destroy(c->rec);
-       kmem_cache_destroy(c->g_rq);
-       kmem_cache_destroy(c->w_rq);
-}
-
-static void pblk_put_global_caches(void)
-{
-       mutex_lock(&pblk_caches.mutex);
-       kref_put(&pblk_caches.kref, pblk_destroy_global_caches);
-       mutex_unlock(&pblk_caches.mutex);
-}
-
-static int pblk_core_init(struct pblk *pblk)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       int ret, max_write_ppas;
-
-       atomic64_set(&pblk->user_wa, 0);
-       atomic64_set(&pblk->pad_wa, 0);
-       atomic64_set(&pblk->gc_wa, 0);
-       pblk->user_rst_wa = 0;
-       pblk->pad_rst_wa = 0;
-       pblk->gc_rst_wa = 0;
-
-       atomic64_set(&pblk->nr_flush, 0);
-       pblk->nr_flush_rst = 0;
-
-       pblk->min_write_pgs = geo->ws_opt;
-       pblk->min_write_pgs_data = pblk->min_write_pgs;
-       max_write_ppas = pblk->min_write_pgs * geo->all_luns;
-       pblk->max_write_pgs = min_t(int, max_write_ppas, NVM_MAX_VLBA);
-       pblk->max_write_pgs = min_t(int, pblk->max_write_pgs,
-               queue_max_hw_sectors(dev->q) / (geo->csecs >> SECTOR_SHIFT));
-       pblk_set_sec_per_write(pblk, pblk->min_write_pgs);
-
-       pblk->oob_meta_size = geo->sos;
-       if (!pblk_is_oob_meta_supported(pblk)) {
-               /* For drives which does not have OOB metadata feature
-                * in order to support recovery feature we need to use
-                * so called packed metadata. Packed metada will store
-                * the same information as OOB metadata (l2p table mapping,
-                * but in the form of the single page at the end of
-                * every write request.
-                */
-               if (pblk->min_write_pgs
-                       * sizeof(struct pblk_sec_meta) > PAGE_SIZE) {
-                       /* We want to keep all the packed metadata on single
-                        * page per write requests. So we need to ensure that
-                        * it will fit.
-                        *
-                        * This is more like sanity check, since there is
-                        * no device with such a big minimal write size
-                        * (above 1 metabytes).
-                        */
-                       pblk_err(pblk, "Not supported min write size\n");
-                       return -EINVAL;
-               }
-               /* For packed meta approach we do some simplification.
-                * On read path we always issue requests which size
-                * equal to max_write_pgs, with all pages filled with
-                * user payload except of last one page which will be
-                * filled with packed metadata.
-                */
-               pblk->max_write_pgs = pblk->min_write_pgs;
-               pblk->min_write_pgs_data = pblk->min_write_pgs - 1;
-       }
-
-       pblk->pad_dist = kcalloc(pblk->min_write_pgs - 1, sizeof(atomic64_t),
-                                                               GFP_KERNEL);
-       if (!pblk->pad_dist)
-               return -ENOMEM;
-
-       if (pblk_get_global_caches())
-               goto fail_free_pad_dist;
-
-       /* Internal bios can be at most the sectors signaled by the device. */
-       ret = mempool_init_page_pool(&pblk->page_bio_pool, NVM_MAX_VLBA, 0);
-       if (ret)
-               goto free_global_caches;
-
-       ret = mempool_init_slab_pool(&pblk->gen_ws_pool, PBLK_GEN_WS_POOL_SIZE,
-                                    pblk_caches.ws);
-       if (ret)
-               goto free_page_bio_pool;
-
-       ret = mempool_init_slab_pool(&pblk->rec_pool, geo->all_luns,
-                                    pblk_caches.rec);
-       if (ret)
-               goto free_gen_ws_pool;
-
-       ret = mempool_init_slab_pool(&pblk->r_rq_pool, geo->all_luns,
-                                    pblk_caches.g_rq);
-       if (ret)
-               goto free_rec_pool;
-
-       ret = mempool_init_slab_pool(&pblk->e_rq_pool, geo->all_luns,
-                                    pblk_caches.g_rq);
-       if (ret)
-               goto free_r_rq_pool;
-
-       ret = mempool_init_slab_pool(&pblk->w_rq_pool, geo->all_luns,
-                                    pblk_caches.w_rq);
-       if (ret)
-               goto free_e_rq_pool;
-
-       pblk->close_wq = alloc_workqueue("pblk-close-wq",
-                       WQ_MEM_RECLAIM | WQ_UNBOUND, PBLK_NR_CLOSE_JOBS);
-       if (!pblk->close_wq)
-               goto free_w_rq_pool;
-
-       pblk->bb_wq = alloc_workqueue("pblk-bb-wq",
-                       WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
-       if (!pblk->bb_wq)
-               goto free_close_wq;
-
-       pblk->r_end_wq = alloc_workqueue("pblk-read-end-wq",
-                       WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
-       if (!pblk->r_end_wq)
-               goto free_bb_wq;
-
-       if (pblk_set_addrf(pblk))
-               goto free_r_end_wq;
-
-       INIT_LIST_HEAD(&pblk->compl_list);
-       INIT_LIST_HEAD(&pblk->resubmit_list);
-
-       return 0;
-
-free_r_end_wq:
-       destroy_workqueue(pblk->r_end_wq);
-free_bb_wq:
-       destroy_workqueue(pblk->bb_wq);
-free_close_wq:
-       destroy_workqueue(pblk->close_wq);
-free_w_rq_pool:
-       mempool_exit(&pblk->w_rq_pool);
-free_e_rq_pool:
-       mempool_exit(&pblk->e_rq_pool);
-free_r_rq_pool:
-       mempool_exit(&pblk->r_rq_pool);
-free_rec_pool:
-       mempool_exit(&pblk->rec_pool);
-free_gen_ws_pool:
-       mempool_exit(&pblk->gen_ws_pool);
-free_page_bio_pool:
-       mempool_exit(&pblk->page_bio_pool);
-free_global_caches:
-       pblk_put_global_caches();
-fail_free_pad_dist:
-       kfree(pblk->pad_dist);
-       return -ENOMEM;
-}
-
-static void pblk_core_free(struct pblk *pblk)
-{
-       if (pblk->close_wq)
-               destroy_workqueue(pblk->close_wq);
-
-       if (pblk->r_end_wq)
-               destroy_workqueue(pblk->r_end_wq);
-
-       if (pblk->bb_wq)
-               destroy_workqueue(pblk->bb_wq);
-
-       mempool_exit(&pblk->page_bio_pool);
-       mempool_exit(&pblk->gen_ws_pool);
-       mempool_exit(&pblk->rec_pool);
-       mempool_exit(&pblk->r_rq_pool);
-       mempool_exit(&pblk->e_rq_pool);
-       mempool_exit(&pblk->w_rq_pool);
-
-       pblk_put_global_caches();
-       kfree(pblk->pad_dist);
-}
-
-static void pblk_line_mg_free(struct pblk *pblk)
-{
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       int i;
-
-       kfree(l_mg->bb_template);
-       kfree(l_mg->bb_aux);
-       kfree(l_mg->vsc_list);
-
-       for (i = 0; i < PBLK_DATA_LINES; i++) {
-               kfree(l_mg->sline_meta[i]);
-               kvfree(l_mg->eline_meta[i]->buf);
-               kfree(l_mg->eline_meta[i]);
-       }
-
-       mempool_destroy(l_mg->bitmap_pool);
-       kmem_cache_destroy(l_mg->bitmap_cache);
-}
-
-static void pblk_line_meta_free(struct pblk_line_mgmt *l_mg,
-                               struct pblk_line *line)
-{
-       struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
-
-       kfree(line->blk_bitmap);
-       kfree(line->erase_bitmap);
-       kfree(line->chks);
-
-       kvfree(w_err_gc->lba_list);
-       kfree(w_err_gc);
-}
-
-static void pblk_lines_free(struct pblk *pblk)
-{
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       struct pblk_line *line;
-       int i;
-
-       for (i = 0; i < l_mg->nr_lines; i++) {
-               line = &pblk->lines[i];
-
-               pblk_line_free(line);
-               pblk_line_meta_free(l_mg, line);
-       }
-
-       pblk_line_mg_free(pblk);
-
-       kfree(pblk->luns);
-       kfree(pblk->lines);
-}
-
-static int pblk_luns_init(struct pblk *pblk)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct pblk_lun *rlun;
-       int i;
-
-       /* TODO: Implement unbalanced LUN support */
-       if (geo->num_lun < 0) {
-               pblk_err(pblk, "unbalanced LUN config.\n");
-               return -EINVAL;
-       }
-
-       pblk->luns = kcalloc(geo->all_luns, sizeof(struct pblk_lun),
-                                                               GFP_KERNEL);
-       if (!pblk->luns)
-               return -ENOMEM;
-
-       for (i = 0; i < geo->all_luns; i++) {
-               /* Stripe across channels */
-               int ch = i % geo->num_ch;
-               int lun_raw = i / geo->num_ch;
-               int lunid = lun_raw + ch * geo->num_lun;
-
-               rlun = &pblk->luns[i];
-               rlun->bppa = dev->luns[lunid];
-
-               sema_init(&rlun->wr_sem, 1);
-       }
-
-       return 0;
-}
-
-/* See comment over struct line_emeta definition */
-static unsigned int calc_emeta_len(struct pblk *pblk)
-{
-       struct pblk_line_meta *lm = &pblk->lm;
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-
-       /* Round to sector size so that lba_list starts on its own sector */
-       lm->emeta_sec[1] = DIV_ROUND_UP(
-                       sizeof(struct line_emeta) + lm->blk_bitmap_len +
-                       sizeof(struct wa_counters), geo->csecs);
-       lm->emeta_len[1] = lm->emeta_sec[1] * geo->csecs;
-
-       /* Round to sector size so that vsc_list starts on its own sector */
-       lm->dsec_per_line = lm->sec_per_line - lm->emeta_sec[0];
-       lm->emeta_sec[2] = DIV_ROUND_UP(lm->dsec_per_line * sizeof(u64),
-                       geo->csecs);
-       lm->emeta_len[2] = lm->emeta_sec[2] * geo->csecs;
-
-       lm->emeta_sec[3] = DIV_ROUND_UP(l_mg->nr_lines * sizeof(u32),
-                       geo->csecs);
-       lm->emeta_len[3] = lm->emeta_sec[3] * geo->csecs;
-
-       lm->vsc_list_len = l_mg->nr_lines * sizeof(u32);
-
-       return (lm->emeta_len[1] + lm->emeta_len[2] + lm->emeta_len[3]);
-}
-
-static int pblk_set_provision(struct pblk *pblk, int nr_free_chks)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       struct pblk_line_meta *lm = &pblk->lm;
-       struct nvm_geo *geo = &dev->geo;
-       sector_t provisioned;
-       int sec_meta, blk_meta, clba;
-       int minimum;
-
-       if (geo->op == NVM_TARGET_DEFAULT_OP)
-               pblk->op = PBLK_DEFAULT_OP;
-       else
-               pblk->op = geo->op;
-
-       minimum = pblk_get_min_chks(pblk);
-       provisioned = nr_free_chks;
-       provisioned *= (100 - pblk->op);
-       sector_div(provisioned, 100);
-
-       if ((nr_free_chks - provisioned) < minimum) {
-               if (geo->op != NVM_TARGET_DEFAULT_OP) {
-                       pblk_err(pblk, "OP too small to create a sane instance\n");
-                       return -EINTR;
-               }
-
-               /* If the user did not specify an OP value, and PBLK_DEFAULT_OP
-                * is not enough, calculate and set sane value
-                */
-
-               provisioned = nr_free_chks - minimum;
-               pblk->op =  (100 * minimum) / nr_free_chks;
-               pblk_info(pblk, "Default OP insufficient, adjusting OP to %d\n",
-                               pblk->op);
-       }
-
-       pblk->op_blks = nr_free_chks - provisioned;
-
-       /* Internally pblk manages all free blocks, but all calculations based
-        * on user capacity consider only provisioned blocks
-        */
-       pblk->rl.total_blocks = nr_free_chks;
-
-       /* Consider sectors used for metadata */
-       sec_meta = (lm->smeta_sec + lm->emeta_sec[0]) * l_mg->nr_free_lines;
-       blk_meta = DIV_ROUND_UP(sec_meta, geo->clba);
-
-       clba = (geo->clba / pblk->min_write_pgs) * pblk->min_write_pgs_data;
-       pblk->capacity = (provisioned - blk_meta) * clba;
-
-       atomic_set(&pblk->rl.free_blocks, nr_free_chks);
-       atomic_set(&pblk->rl.free_user_blocks, nr_free_chks);
-
-       return 0;
-}
-
-static int pblk_setup_line_meta_chk(struct pblk *pblk, struct pblk_line *line,
-                                  struct nvm_chk_meta *meta)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct pblk_line_meta *lm = &pblk->lm;
-       int i, nr_bad_chks = 0;
-
-       for (i = 0; i < lm->blk_per_line; i++) {
-               struct pblk_lun *rlun = &pblk->luns[i];
-               struct nvm_chk_meta *chunk;
-               struct nvm_chk_meta *chunk_meta;
-               struct ppa_addr ppa;
-               int pos;
-
-               ppa = rlun->bppa;
-               pos = pblk_ppa_to_pos(geo, ppa);
-               chunk = &line->chks[pos];
-
-               ppa.m.chk = line->id;
-               chunk_meta = pblk_chunk_get_off(pblk, meta, ppa);
-
-               chunk->state = chunk_meta->state;
-               chunk->type = chunk_meta->type;
-               chunk->wi = chunk_meta->wi;
-               chunk->slba = chunk_meta->slba;
-               chunk->cnlb = chunk_meta->cnlb;
-               chunk->wp = chunk_meta->wp;
-
-               trace_pblk_chunk_state(pblk_disk_name(pblk), &ppa,
-                                       chunk->state);
-
-               if (chunk->type & NVM_CHK_TP_SZ_SPEC) {
-                       WARN_ONCE(1, "pblk: custom-sized chunks unsupported\n");
-                       continue;
-               }
-
-               if (!(chunk->state & NVM_CHK_ST_OFFLINE))
-                       continue;
-
-               set_bit(pos, line->blk_bitmap);
-               nr_bad_chks++;
-       }
-
-       return nr_bad_chks;
-}
-
-static long pblk_setup_line_meta(struct pblk *pblk, struct pblk_line *line,
-                                void *chunk_meta, int line_id)
-{
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       struct pblk_line_meta *lm = &pblk->lm;
-       long nr_bad_chks, chk_in_line;
-
-       line->pblk = pblk;
-       line->id = line_id;
-       line->type = PBLK_LINETYPE_FREE;
-       line->state = PBLK_LINESTATE_NEW;
-       line->gc_group = PBLK_LINEGC_NONE;
-       line->vsc = &l_mg->vsc_list[line_id];
-       spin_lock_init(&line->lock);
-
-       nr_bad_chks = pblk_setup_line_meta_chk(pblk, line, chunk_meta);
-
-       chk_in_line = lm->blk_per_line - nr_bad_chks;
-       if (nr_bad_chks < 0 || nr_bad_chks > lm->blk_per_line ||
-                                       chk_in_line < lm->min_blk_line) {
-               line->state = PBLK_LINESTATE_BAD;
-               list_add_tail(&line->list, &l_mg->bad_list);
-               return 0;
-       }
-
-       atomic_set(&line->blk_in_line, chk_in_line);
-       list_add_tail(&line->list, &l_mg->free_list);
-       l_mg->nr_free_lines++;
-
-       return chk_in_line;
-}
-
-static int pblk_alloc_line_meta(struct pblk *pblk, struct pblk_line *line)
-{
-       struct pblk_line_meta *lm = &pblk->lm;
-
-       line->blk_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL);
-       if (!line->blk_bitmap)
-               return -ENOMEM;
-
-       line->erase_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL);
-       if (!line->erase_bitmap)
-               goto free_blk_bitmap;
-
-
-       line->chks = kmalloc_array(lm->blk_per_line,
-                                  sizeof(struct nvm_chk_meta), GFP_KERNEL);
-       if (!line->chks)
-               goto free_erase_bitmap;
-
-       line->w_err_gc = kzalloc(sizeof(struct pblk_w_err_gc), GFP_KERNEL);
-       if (!line->w_err_gc)
-               goto free_chks;
-
-       return 0;
-
-free_chks:
-       kfree(line->chks);
-free_erase_bitmap:
-       kfree(line->erase_bitmap);
-free_blk_bitmap:
-       kfree(line->blk_bitmap);
-       return -ENOMEM;
-}
-
-static int pblk_line_mg_init(struct pblk *pblk)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       struct pblk_line_meta *lm = &pblk->lm;
-       int i, bb_distance;
-
-       l_mg->nr_lines = geo->num_chk;
-       l_mg->log_line = l_mg->data_line = NULL;
-       l_mg->l_seq_nr = l_mg->d_seq_nr = 0;
-       l_mg->nr_free_lines = 0;
-       bitmap_zero(&l_mg->meta_bitmap, PBLK_DATA_LINES);
-
-       INIT_LIST_HEAD(&l_mg->free_list);
-       INIT_LIST_HEAD(&l_mg->corrupt_list);
-       INIT_LIST_HEAD(&l_mg->bad_list);
-       INIT_LIST_HEAD(&l_mg->gc_full_list);
-       INIT_LIST_HEAD(&l_mg->gc_high_list);
-       INIT_LIST_HEAD(&l_mg->gc_mid_list);
-       INIT_LIST_HEAD(&l_mg->gc_low_list);
-       INIT_LIST_HEAD(&l_mg->gc_empty_list);
-       INIT_LIST_HEAD(&l_mg->gc_werr_list);
-
-       INIT_LIST_HEAD(&l_mg->emeta_list);
-
-       l_mg->gc_lists[0] = &l_mg->gc_werr_list;
-       l_mg->gc_lists[1] = &l_mg->gc_high_list;
-       l_mg->gc_lists[2] = &l_mg->gc_mid_list;
-       l_mg->gc_lists[3] = &l_mg->gc_low_list;
-
-       spin_lock_init(&l_mg->free_lock);
-       spin_lock_init(&l_mg->close_lock);
-       spin_lock_init(&l_mg->gc_lock);
-
-       l_mg->vsc_list = kcalloc(l_mg->nr_lines, sizeof(__le32), GFP_KERNEL);
-       if (!l_mg->vsc_list)
-               goto fail;
-
-       l_mg->bb_template = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
-       if (!l_mg->bb_template)
-               goto fail_free_vsc_list;
-
-       l_mg->bb_aux = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
-       if (!l_mg->bb_aux)
-               goto fail_free_bb_template;
-
-       /* smeta is always small enough to fit on a kmalloc memory allocation,
-        * emeta depends on the number of LUNs allocated to the pblk instance
-        */
-       for (i = 0; i < PBLK_DATA_LINES; i++) {
-               l_mg->sline_meta[i] = kmalloc(lm->smeta_len, GFP_KERNEL);
-               if (!l_mg->sline_meta[i])
-                       goto fail_free_smeta;
-       }
-
-       l_mg->bitmap_cache = kmem_cache_create("pblk_lm_bitmap",
-                       lm->sec_bitmap_len, 0, 0, NULL);
-       if (!l_mg->bitmap_cache)
-               goto fail_free_smeta;
-
-       /* the bitmap pool is used for both valid and map bitmaps */
-       l_mg->bitmap_pool = mempool_create_slab_pool(PBLK_DATA_LINES * 2,
-                               l_mg->bitmap_cache);
-       if (!l_mg->bitmap_pool)
-               goto fail_destroy_bitmap_cache;
-
-       /* emeta allocates three different buffers for managing metadata with
-        * in-memory and in-media layouts
-        */
-       for (i = 0; i < PBLK_DATA_LINES; i++) {
-               struct pblk_emeta *emeta;
-
-               emeta = kmalloc(sizeof(struct pblk_emeta), GFP_KERNEL);
-               if (!emeta)
-                       goto fail_free_emeta;
-
-               emeta->buf = kvmalloc(lm->emeta_len[0], GFP_KERNEL);
-               if (!emeta->buf) {
-                       kfree(emeta);
-                       goto fail_free_emeta;
-               }
-
-               emeta->nr_entries = lm->emeta_sec[0];
-               l_mg->eline_meta[i] = emeta;
-       }
-
-       for (i = 0; i < l_mg->nr_lines; i++)
-               l_mg->vsc_list[i] = cpu_to_le32(EMPTY_ENTRY);
-
-       bb_distance = (geo->all_luns) * geo->ws_opt;
-       for (i = 0; i < lm->sec_per_line; i += bb_distance)
-               bitmap_set(l_mg->bb_template, i, geo->ws_opt);
-
-       return 0;
-
-fail_free_emeta:
-       while (--i >= 0) {
-               kvfree(l_mg->eline_meta[i]->buf);
-               kfree(l_mg->eline_meta[i]);
-       }
-
-       mempool_destroy(l_mg->bitmap_pool);
-fail_destroy_bitmap_cache:
-       kmem_cache_destroy(l_mg->bitmap_cache);
-fail_free_smeta:
-       for (i = 0; i < PBLK_DATA_LINES; i++)
-               kfree(l_mg->sline_meta[i]);
-       kfree(l_mg->bb_aux);
-fail_free_bb_template:
-       kfree(l_mg->bb_template);
-fail_free_vsc_list:
-       kfree(l_mg->vsc_list);
-fail:
-       return -ENOMEM;
-}
-
-static int pblk_line_meta_init(struct pblk *pblk)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct pblk_line_meta *lm = &pblk->lm;
-       unsigned int smeta_len, emeta_len;
-       int i;
-
-       lm->sec_per_line = geo->clba * geo->all_luns;
-       lm->blk_per_line = geo->all_luns;
-       lm->blk_bitmap_len = BITS_TO_LONGS(geo->all_luns) * sizeof(long);
-       lm->sec_bitmap_len = BITS_TO_LONGS(lm->sec_per_line) * sizeof(long);
-       lm->lun_bitmap_len = BITS_TO_LONGS(geo->all_luns) * sizeof(long);
-       lm->mid_thrs = lm->sec_per_line / 2;
-       lm->high_thrs = lm->sec_per_line / 4;
-       lm->meta_distance = (geo->all_luns / 2) * pblk->min_write_pgs;
-
-       /* Calculate necessary pages for smeta. See comment over struct
-        * line_smeta definition
-        */
-       i = 1;
-add_smeta_page:
-       lm->smeta_sec = i * geo->ws_opt;
-       lm->smeta_len = lm->smeta_sec * geo->csecs;
-
-       smeta_len = sizeof(struct line_smeta) + lm->lun_bitmap_len;
-       if (smeta_len > lm->smeta_len) {
-               i++;
-               goto add_smeta_page;
-       }
-
-       /* Calculate necessary pages for emeta. See comment over struct
-        * line_emeta definition
-        */
-       i = 1;
-add_emeta_page:
-       lm->emeta_sec[0] = i * geo->ws_opt;
-       lm->emeta_len[0] = lm->emeta_sec[0] * geo->csecs;
-
-       emeta_len = calc_emeta_len(pblk);
-       if (emeta_len > lm->emeta_len[0]) {
-               i++;
-               goto add_emeta_page;
-       }
-
-       lm->emeta_bb = geo->all_luns > i ? geo->all_luns - i : 0;
-
-       lm->min_blk_line = 1;
-       if (geo->all_luns > 1)
-               lm->min_blk_line += DIV_ROUND_UP(lm->smeta_sec +
-                                       lm->emeta_sec[0], geo->clba);
-
-       if (lm->min_blk_line > lm->blk_per_line) {
-               pblk_err(pblk, "config. not supported. Min. LUN in line:%d\n",
-                                                       lm->blk_per_line);
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int pblk_lines_init(struct pblk *pblk)
-{
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       struct pblk_line *line;
-       void *chunk_meta;
-       int nr_free_chks = 0;
-       int i, ret;
-
-       ret = pblk_line_meta_init(pblk);
-       if (ret)
-               return ret;
-
-       ret = pblk_line_mg_init(pblk);
-       if (ret)
-               return ret;
-
-       ret = pblk_luns_init(pblk);
-       if (ret)
-               goto fail_free_meta;
-
-       chunk_meta = pblk_get_chunk_meta(pblk);
-       if (IS_ERR(chunk_meta)) {
-               ret = PTR_ERR(chunk_meta);
-               goto fail_free_luns;
-       }
-
-       pblk->lines = kcalloc(l_mg->nr_lines, sizeof(struct pblk_line),
-                                                               GFP_KERNEL);
-       if (!pblk->lines) {
-               ret = -ENOMEM;
-               goto fail_free_chunk_meta;
-       }
-
-       for (i = 0; i < l_mg->nr_lines; i++) {
-               line = &pblk->lines[i];
-
-               ret = pblk_alloc_line_meta(pblk, line);
-               if (ret)
-                       goto fail_free_lines;
-
-               nr_free_chks += pblk_setup_line_meta(pblk, line, chunk_meta, i);
-
-               trace_pblk_line_state(pblk_disk_name(pblk), line->id,
-                                                               line->state);
-       }
-
-       if (!nr_free_chks) {
-               pblk_err(pblk, "too many bad blocks prevent for sane instance\n");
-               ret = -EINTR;
-               goto fail_free_lines;
-       }
-
-       ret = pblk_set_provision(pblk, nr_free_chks);
-       if (ret)
-               goto fail_free_lines;
-
-       vfree(chunk_meta);
-       return 0;
-
-fail_free_lines:
-       while (--i >= 0)
-               pblk_line_meta_free(l_mg, &pblk->lines[i]);
-       kfree(pblk->lines);
-fail_free_chunk_meta:
-       vfree(chunk_meta);
-fail_free_luns:
-       kfree(pblk->luns);
-fail_free_meta:
-       pblk_line_mg_free(pblk);
-
-       return ret;
-}
-
-static int pblk_writer_init(struct pblk *pblk)
-{
-       pblk->writer_ts = kthread_create(pblk_write_ts, pblk, "pblk-writer-t");
-       if (IS_ERR(pblk->writer_ts)) {
-               int err = PTR_ERR(pblk->writer_ts);
-
-               if (err != -EINTR)
-                       pblk_err(pblk, "could not allocate writer kthread (%d)\n",
-                                       err);
-               return err;
-       }
-
-       timer_setup(&pblk->wtimer, pblk_write_timer_fn, 0);
-       mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(100));
-
-       return 0;
-}
-
-static void pblk_writer_stop(struct pblk *pblk)
-{
-       /* The pipeline must be stopped and the write buffer emptied before the
-        * write thread is stopped
-        */
-       WARN(pblk_rb_read_count(&pblk->rwb),
-                       "Stopping not fully persisted write buffer\n");
-
-       WARN(pblk_rb_sync_count(&pblk->rwb),
-                       "Stopping not fully synced write buffer\n");
-
-       del_timer_sync(&pblk->wtimer);
-       if (pblk->writer_ts)
-               kthread_stop(pblk->writer_ts);
-}
-
-static void pblk_free(struct pblk *pblk)
-{
-       pblk_lines_free(pblk);
-       pblk_l2p_free(pblk);
-       pblk_rwb_free(pblk);
-       pblk_core_free(pblk);
-
-       kfree(pblk);
-}
-
-static void pblk_tear_down(struct pblk *pblk, bool graceful)
-{
-       if (graceful)
-               __pblk_pipeline_flush(pblk);
-       __pblk_pipeline_stop(pblk);
-       pblk_writer_stop(pblk);
-       pblk_rb_sync_l2p(&pblk->rwb);
-       pblk_rl_free(&pblk->rl);
-
-       pblk_debug(pblk, "consistent tear down (graceful:%d)\n", graceful);
-}
-
-static void pblk_exit(void *private, bool graceful)
-{
-       struct pblk *pblk = private;
-
-       pblk_gc_exit(pblk, graceful);
-       pblk_tear_down(pblk, graceful);
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       pblk_info(pblk, "exit: L2P CRC: %x\n", pblk_l2p_crc(pblk));
-#endif
-
-       pblk_free(pblk);
-}
-
-static sector_t pblk_capacity(void *private)
-{
-       struct pblk *pblk = private;
-
-       return pblk->capacity * NR_PHY_IN_LOG;
-}
-
-static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
-                      int flags)
-{
-       struct nvm_geo *geo = &dev->geo;
-       struct request_queue *bqueue = dev->q;
-       struct request_queue *tqueue = tdisk->queue;
-       struct pblk *pblk;
-       int ret;
-
-       pblk = kzalloc(sizeof(struct pblk), GFP_KERNEL);
-       if (!pblk)
-               return ERR_PTR(-ENOMEM);
-
-       pblk->dev = dev;
-       pblk->disk = tdisk;
-       pblk->state = PBLK_STATE_RUNNING;
-       trace_pblk_state(pblk_disk_name(pblk), pblk->state);
-       pblk->gc.gc_enabled = 0;
-
-       if (!(geo->version == NVM_OCSSD_SPEC_12 ||
-                                       geo->version == NVM_OCSSD_SPEC_20)) {
-               pblk_err(pblk, "OCSSD version not supported (%u)\n",
-                                                       geo->version);
-               kfree(pblk);
-               return ERR_PTR(-EINVAL);
-       }
-
-       if (geo->ext) {
-               pblk_err(pblk, "extended metadata not supported\n");
-               kfree(pblk);
-               return ERR_PTR(-EINVAL);
-       }
-
-       spin_lock_init(&pblk->resubmit_lock);
-       spin_lock_init(&pblk->trans_lock);
-       spin_lock_init(&pblk->lock);
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       atomic_long_set(&pblk->inflight_writes, 0);
-       atomic_long_set(&pblk->padded_writes, 0);
-       atomic_long_set(&pblk->padded_wb, 0);
-       atomic_long_set(&pblk->req_writes, 0);
-       atomic_long_set(&pblk->sub_writes, 0);
-       atomic_long_set(&pblk->sync_writes, 0);
-       atomic_long_set(&pblk->inflight_reads, 0);
-       atomic_long_set(&pblk->cache_reads, 0);
-       atomic_long_set(&pblk->sync_reads, 0);
-       atomic_long_set(&pblk->recov_writes, 0);
-       atomic_long_set(&pblk->recov_writes, 0);
-       atomic_long_set(&pblk->recov_gc_writes, 0);
-       atomic_long_set(&pblk->recov_gc_reads, 0);
-#endif
-
-       atomic_long_set(&pblk->read_failed, 0);
-       atomic_long_set(&pblk->read_empty, 0);
-       atomic_long_set(&pblk->read_high_ecc, 0);
-       atomic_long_set(&pblk->read_failed_gc, 0);
-       atomic_long_set(&pblk->write_failed, 0);
-       atomic_long_set(&pblk->erase_failed, 0);
-
-       ret = pblk_core_init(pblk);
-       if (ret) {
-               pblk_err(pblk, "could not initialize core\n");
-               goto fail;
-       }
-
-       ret = pblk_lines_init(pblk);
-       if (ret) {
-               pblk_err(pblk, "could not initialize lines\n");
-               goto fail_free_core;
-       }
-
-       ret = pblk_rwb_init(pblk);
-       if (ret) {
-               pblk_err(pblk, "could not initialize write buffer\n");
-               goto fail_free_lines;
-       }
-
-       ret = pblk_l2p_init(pblk, flags & NVM_TARGET_FACTORY);
-       if (ret) {
-               pblk_err(pblk, "could not initialize maps\n");
-               goto fail_free_rwb;
-       }
-
-       ret = pblk_writer_init(pblk);
-       if (ret) {
-               if (ret != -EINTR)
-                       pblk_err(pblk, "could not initialize write thread\n");
-               goto fail_free_l2p;
-       }
-
-       ret = pblk_gc_init(pblk);
-       if (ret) {
-               pblk_err(pblk, "could not initialize gc\n");
-               goto fail_stop_writer;
-       }
-
-       /* inherit the size from the underlying device */
-       blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue));
-       blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue));
-
-       blk_queue_write_cache(tqueue, true, false);
-
-       tqueue->limits.discard_granularity = geo->clba * geo->csecs;
-       tqueue->limits.discard_alignment = 0;
-       blk_queue_max_discard_sectors(tqueue, UINT_MAX >> 9);
-       blk_queue_flag_set(QUEUE_FLAG_DISCARD, tqueue);
-
-       pblk_info(pblk, "luns:%u, lines:%d, secs:%llu, buf entries:%u\n",
-                       geo->all_luns, pblk->l_mg.nr_lines,
-                       (unsigned long long)pblk->capacity,
-                       pblk->rwb.nr_entries);
-
-       wake_up_process(pblk->writer_ts);
-
-       /* Check if we need to start GC */
-       pblk_gc_should_kick(pblk);
-
-       return pblk;
-
-fail_stop_writer:
-       pblk_writer_stop(pblk);
-fail_free_l2p:
-       pblk_l2p_free(pblk);
-fail_free_rwb:
-       pblk_rwb_free(pblk);
-fail_free_lines:
-       pblk_lines_free(pblk);
-fail_free_core:
-       pblk_core_free(pblk);
-fail:
-       kfree(pblk);
-       return ERR_PTR(ret);
-}
-
-/* physical block device target */
-static struct nvm_tgt_type tt_pblk = {
-       .name           = "pblk",
-       .version        = {1, 0, 0},
-
-       .bops           = &pblk_bops,
-       .capacity       = pblk_capacity,
-
-       .init           = pblk_init,
-       .exit           = pblk_exit,
-
-       .sysfs_init     = pblk_sysfs_init,
-       .sysfs_exit     = pblk_sysfs_exit,
-       .owner          = THIS_MODULE,
-};
-
-static int __init pblk_module_init(void)
-{
-       int ret;
-
-       ret = bioset_init(&pblk_bio_set, BIO_POOL_SIZE, 0, 0);
-       if (ret)
-               return ret;
-       ret = nvm_register_tgt_type(&tt_pblk);
-       if (ret)
-               bioset_exit(&pblk_bio_set);
-       return ret;
-}
-
-static void pblk_module_exit(void)
-{
-       bioset_exit(&pblk_bio_set);
-       nvm_unregister_tgt_type(&tt_pblk);
-}
-
-module_init(pblk_module_init);
-module_exit(pblk_module_exit);
-MODULE_AUTHOR("Javier Gonzalez <javier@cnexlabs.com>");
-MODULE_AUTHOR("Matias Bjorling <matias@cnexlabs.com>");
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("Physical Block-Device for Open-Channel SSDs");
diff --git a/drivers/lightnvm/pblk-map.c b/drivers/lightnvm/pblk-map.c
deleted file mode 100644 (file)
index 5408e32..0000000
+++ /dev/null
@@ -1,210 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2016 CNEX Labs
- * Initial release: Javier Gonzalez <javier@cnexlabs.com>
- *                  Matias Bjorling <matias@cnexlabs.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * pblk-map.c - pblk's lba-ppa mapping strategy
- *
- */
-
-#include "pblk.h"
-
-static int pblk_map_page_data(struct pblk *pblk, unsigned int sentry,
-                             struct ppa_addr *ppa_list,
-                             unsigned long *lun_bitmap,
-                             void *meta_list,
-                             unsigned int valid_secs)
-{
-       struct pblk_line *line = pblk_line_get_data(pblk);
-       struct pblk_emeta *emeta;
-       struct pblk_w_ctx *w_ctx;
-       __le64 *lba_list;
-       u64 paddr;
-       int nr_secs = pblk->min_write_pgs;
-       int i;
-
-       if (!line)
-               return -ENOSPC;
-
-       if (pblk_line_is_full(line)) {
-               struct pblk_line *prev_line = line;
-
-               /* If we cannot allocate a new line, make sure to store metadata
-                * on current line and then fail
-                */
-               line = pblk_line_replace_data(pblk);
-               pblk_line_close_meta(pblk, prev_line);
-
-               if (!line) {
-                       pblk_pipeline_stop(pblk);
-                       return -ENOSPC;
-               }
-
-       }
-
-       emeta = line->emeta;
-       lba_list = emeta_to_lbas(pblk, emeta->buf);
-
-       paddr = pblk_alloc_page(pblk, line, nr_secs);
-
-       for (i = 0; i < nr_secs; i++, paddr++) {
-               struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i);
-               __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
-
-               /* ppa to be sent to the device */
-               ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
-
-               /* Write context for target bio completion on write buffer. Note
-                * that the write buffer is protected by the sync backpointer,
-                * and a single writer thread have access to each specific entry
-                * at a time. Thus, it is safe to modify the context for the
-                * entry we are setting up for submission without taking any
-                * lock or memory barrier.
-                */
-               if (i < valid_secs) {
-                       kref_get(&line->ref);
-                       atomic_inc(&line->sec_to_update);
-                       w_ctx = pblk_rb_w_ctx(&pblk->rwb, sentry + i);
-                       w_ctx->ppa = ppa_list[i];
-                       meta->lba = cpu_to_le64(w_ctx->lba);
-                       lba_list[paddr] = cpu_to_le64(w_ctx->lba);
-                       if (lba_list[paddr] != addr_empty)
-                               line->nr_valid_lbas++;
-                       else
-                               atomic64_inc(&pblk->pad_wa);
-               } else {
-                       lba_list[paddr] = addr_empty;
-                       meta->lba = addr_empty;
-                       __pblk_map_invalidate(pblk, line, paddr);
-               }
-       }
-
-       pblk_down_rq(pblk, ppa_list[0], lun_bitmap);
-       return 0;
-}
-
-int pblk_map_rq(struct pblk *pblk, struct nvm_rq *rqd, unsigned int sentry,
-                unsigned long *lun_bitmap, unsigned int valid_secs,
-                unsigned int off)
-{
-       void *meta_list = pblk_get_meta_for_writes(pblk, rqd);
-       void *meta_buffer;
-       struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
-       unsigned int map_secs;
-       int min = pblk->min_write_pgs;
-       int i;
-       int ret;
-
-       for (i = off; i < rqd->nr_ppas; i += min) {
-               map_secs = (i + min > valid_secs) ? (valid_secs % min) : min;
-               meta_buffer = pblk_get_meta(pblk, meta_list, i);
-
-               ret = pblk_map_page_data(pblk, sentry + i, &ppa_list[i],
-                                       lun_bitmap, meta_buffer, map_secs);
-               if (ret)
-                       return ret;
-       }
-
-       return 0;
-}
-
-/* only if erase_ppa is set, acquire erase semaphore */
-int pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
-                      unsigned int sentry, unsigned long *lun_bitmap,
-                      unsigned int valid_secs, struct ppa_addr *erase_ppa)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct pblk_line_meta *lm = &pblk->lm;
-       void *meta_list = pblk_get_meta_for_writes(pblk, rqd);
-       void *meta_buffer;
-       struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
-       struct pblk_line *e_line, *d_line;
-       unsigned int map_secs;
-       int min = pblk->min_write_pgs;
-       int i, erase_lun;
-       int ret;
-
-
-       for (i = 0; i < rqd->nr_ppas; i += min) {
-               map_secs = (i + min > valid_secs) ? (valid_secs % min) : min;
-               meta_buffer = pblk_get_meta(pblk, meta_list, i);
-
-               ret = pblk_map_page_data(pblk, sentry + i, &ppa_list[i],
-                                       lun_bitmap, meta_buffer, map_secs);
-               if (ret)
-                       return ret;
-
-               erase_lun = pblk_ppa_to_pos(geo, ppa_list[i]);
-
-               /* line can change after page map. We might also be writing the
-                * last line.
-                */
-               e_line = pblk_line_get_erase(pblk);
-               if (!e_line)
-                       return pblk_map_rq(pblk, rqd, sentry, lun_bitmap,
-                                                       valid_secs, i + min);
-
-               spin_lock(&e_line->lock);
-               if (!test_bit(erase_lun, e_line->erase_bitmap)) {
-                       set_bit(erase_lun, e_line->erase_bitmap);
-                       atomic_dec(&e_line->left_eblks);
-
-                       *erase_ppa = ppa_list[i];
-                       erase_ppa->a.blk = e_line->id;
-                       erase_ppa->a.reserved = 0;
-
-                       spin_unlock(&e_line->lock);
-
-                       /* Avoid evaluating e_line->left_eblks */
-                       return pblk_map_rq(pblk, rqd, sentry, lun_bitmap,
-                                                       valid_secs, i + min);
-               }
-               spin_unlock(&e_line->lock);
-       }
-
-       d_line = pblk_line_get_data(pblk);
-
-       /* line can change after page map. We might also be writing the
-        * last line.
-        */
-       e_line = pblk_line_get_erase(pblk);
-       if (!e_line)
-               return -ENOSPC;
-
-       /* Erase blocks that are bad in this line but might not be in next */
-       if (unlikely(pblk_ppa_empty(*erase_ppa)) &&
-                       bitmap_weight(d_line->blk_bitmap, lm->blk_per_line)) {
-               int bit = -1;
-
-retry:
-               bit = find_next_bit(d_line->blk_bitmap,
-                                               lm->blk_per_line, bit + 1);
-               if (bit >= lm->blk_per_line)
-                       return 0;
-
-               spin_lock(&e_line->lock);
-               if (test_bit(bit, e_line->erase_bitmap)) {
-                       spin_unlock(&e_line->lock);
-                       goto retry;
-               }
-               spin_unlock(&e_line->lock);
-
-               set_bit(bit, e_line->erase_bitmap);
-               atomic_dec(&e_line->left_eblks);
-               *erase_ppa = pblk->luns[bit].bppa; /* set ch and lun */
-               erase_ppa->a.blk = e_line->id;
-       }
-
-       return 0;
-}
diff --git a/drivers/lightnvm/pblk-rb.c b/drivers/lightnvm/pblk-rb.c
deleted file mode 100644 (file)
index 5abb170..0000000
+++ /dev/null
@@ -1,858 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2016 CNEX Labs
- * Initial release: Javier Gonzalez <javier@cnexlabs.com>
- *
- * Based upon the circular ringbuffer.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * pblk-rb.c - pblk's write buffer
- */
-
-#include <linux/circ_buf.h>
-
-#include "pblk.h"
-
-static DECLARE_RWSEM(pblk_rb_lock);
-
-static void pblk_rb_data_free(struct pblk_rb *rb)
-{
-       struct pblk_rb_pages *p, *t;
-
-       down_write(&pblk_rb_lock);
-       list_for_each_entry_safe(p, t, &rb->pages, list) {
-               free_pages((unsigned long)page_address(p->pages), p->order);
-               list_del(&p->list);
-               kfree(p);
-       }
-       up_write(&pblk_rb_lock);
-}
-
-void pblk_rb_free(struct pblk_rb *rb)
-{
-       pblk_rb_data_free(rb);
-       vfree(rb->entries);
-}
-
-/*
- * pblk_rb_calculate_size -- calculate the size of the write buffer
- */
-static unsigned int pblk_rb_calculate_size(unsigned int nr_entries,
-                                          unsigned int threshold)
-{
-       unsigned int thr_sz = 1 << (get_count_order(threshold + NVM_MAX_VLBA));
-       unsigned int max_sz = max(thr_sz, nr_entries);
-       unsigned int max_io;
-
-       /* Alloc a write buffer that can (i) fit at least two split bios
-        * (considering max I/O size NVM_MAX_VLBA, and (ii) guarantee that the
-        * threshold will be respected
-        */
-       max_io = (1 << max((int)(get_count_order(max_sz)),
-                               (int)(get_count_order(NVM_MAX_VLBA << 1))));
-       if ((threshold + NVM_MAX_VLBA) >= max_io)
-               max_io <<= 1;
-
-       return max_io;
-}
-
-/*
- * Initialize ring buffer. The data and metadata buffers must be previously
- * allocated and their size must be a power of two
- * (Documentation/core-api/circular-buffers.rst)
- */
-int pblk_rb_init(struct pblk_rb *rb, unsigned int size, unsigned int threshold,
-                unsigned int seg_size)
-{
-       struct pblk *pblk = container_of(rb, struct pblk, rwb);
-       struct pblk_rb_entry *entries;
-       unsigned int init_entry = 0;
-       unsigned int max_order = MAX_ORDER - 1;
-       unsigned int power_size, power_seg_sz;
-       unsigned int alloc_order, order, iter;
-       unsigned int nr_entries;
-
-       nr_entries = pblk_rb_calculate_size(size, threshold);
-       entries = vzalloc(array_size(nr_entries, sizeof(struct pblk_rb_entry)));
-       if (!entries)
-               return -ENOMEM;
-
-       power_size = get_count_order(nr_entries);
-       power_seg_sz = get_count_order(seg_size);
-
-       down_write(&pblk_rb_lock);
-       rb->entries = entries;
-       rb->seg_size = (1 << power_seg_sz);
-       rb->nr_entries = (1 << power_size);
-       rb->mem = rb->subm = rb->sync = rb->l2p_update = 0;
-       rb->back_thres = threshold;
-       rb->flush_point = EMPTY_ENTRY;
-
-       spin_lock_init(&rb->w_lock);
-       spin_lock_init(&rb->s_lock);
-
-       INIT_LIST_HEAD(&rb->pages);
-
-       alloc_order = power_size;
-       if (alloc_order >= max_order) {
-               order = max_order;
-               iter = (1 << (alloc_order - max_order));
-       } else {
-               order = alloc_order;
-               iter = 1;
-       }
-
-       do {
-               struct pblk_rb_entry *entry;
-               struct pblk_rb_pages *page_set;
-               void *kaddr;
-               unsigned long set_size;
-               int i;
-
-               page_set = kmalloc(sizeof(struct pblk_rb_pages), GFP_KERNEL);
-               if (!page_set) {
-                       up_write(&pblk_rb_lock);
-                       vfree(entries);
-                       return -ENOMEM;
-               }
-
-               page_set->order = order;
-               page_set->pages = alloc_pages(GFP_KERNEL, order);
-               if (!page_set->pages) {
-                       kfree(page_set);
-                       pblk_rb_data_free(rb);
-                       up_write(&pblk_rb_lock);
-                       vfree(entries);
-                       return -ENOMEM;
-               }
-               kaddr = page_address(page_set->pages);
-
-               entry = &rb->entries[init_entry];
-               entry->data = kaddr;
-               entry->cacheline = pblk_cacheline_to_addr(init_entry++);
-               entry->w_ctx.flags = PBLK_WRITABLE_ENTRY;
-
-               set_size = (1 << order);
-               for (i = 1; i < set_size; i++) {
-                       entry = &rb->entries[init_entry];
-                       entry->cacheline = pblk_cacheline_to_addr(init_entry++);
-                       entry->data = kaddr + (i * rb->seg_size);
-                       entry->w_ctx.flags = PBLK_WRITABLE_ENTRY;
-                       bio_list_init(&entry->w_ctx.bios);
-               }
-
-               list_add_tail(&page_set->list, &rb->pages);
-               iter--;
-       } while (iter > 0);
-       up_write(&pblk_rb_lock);
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       atomic_set(&rb->inflight_flush_point, 0);
-#endif
-
-       /*
-        * Initialize rate-limiter, which controls access to the write buffer
-        * by user and GC I/O
-        */
-       pblk_rl_init(&pblk->rl, rb->nr_entries, threshold);
-
-       return 0;
-}
-
-static void clean_wctx(struct pblk_w_ctx *w_ctx)
-{
-       int flags;
-
-       flags = READ_ONCE(w_ctx->flags);
-       WARN_ONCE(!(flags & PBLK_SUBMITTED_ENTRY),
-                       "pblk: overwriting unsubmitted data\n");
-
-       /* Release flags on context. Protect from writes and reads */
-       smp_store_release(&w_ctx->flags, PBLK_WRITABLE_ENTRY);
-       pblk_ppa_set_empty(&w_ctx->ppa);
-       w_ctx->lba = ADDR_EMPTY;
-}
-
-#define pblk_rb_ring_count(head, tail, size) CIRC_CNT(head, tail, size)
-#define pblk_rb_ring_space(rb, head, tail, size) \
-                                       (CIRC_SPACE(head, tail, size))
-
-/*
- * Buffer space is calculated with respect to the back pointer signaling
- * synchronized entries to the media.
- */
-static unsigned int pblk_rb_space(struct pblk_rb *rb)
-{
-       unsigned int mem = READ_ONCE(rb->mem);
-       unsigned int sync = READ_ONCE(rb->sync);
-
-       return pblk_rb_ring_space(rb, mem, sync, rb->nr_entries);
-}
-
-unsigned int pblk_rb_ptr_wrap(struct pblk_rb *rb, unsigned int p,
-                             unsigned int nr_entries)
-{
-       return (p + nr_entries) & (rb->nr_entries - 1);
-}
-
-/*
- * Buffer count is calculated with respect to the submission entry signaling the
- * entries that are available to send to the media
- */
-unsigned int pblk_rb_read_count(struct pblk_rb *rb)
-{
-       unsigned int mem = READ_ONCE(rb->mem);
-       unsigned int subm = READ_ONCE(rb->subm);
-
-       return pblk_rb_ring_count(mem, subm, rb->nr_entries);
-}
-
-unsigned int pblk_rb_sync_count(struct pblk_rb *rb)
-{
-       unsigned int mem = READ_ONCE(rb->mem);
-       unsigned int sync = READ_ONCE(rb->sync);
-
-       return pblk_rb_ring_count(mem, sync, rb->nr_entries);
-}
-
-unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int nr_entries)
-{
-       unsigned int subm;
-
-       subm = READ_ONCE(rb->subm);
-       /* Commit read means updating submission pointer */
-       smp_store_release(&rb->subm, pblk_rb_ptr_wrap(rb, subm, nr_entries));
-
-       return subm;
-}
-
-static int __pblk_rb_update_l2p(struct pblk_rb *rb, unsigned int to_update)
-{
-       struct pblk *pblk = container_of(rb, struct pblk, rwb);
-       struct pblk_line *line;
-       struct pblk_rb_entry *entry;
-       struct pblk_w_ctx *w_ctx;
-       unsigned int user_io = 0, gc_io = 0;
-       unsigned int i;
-       int flags;
-
-       for (i = 0; i < to_update; i++) {
-               entry = &rb->entries[rb->l2p_update];
-               w_ctx = &entry->w_ctx;
-
-               flags = READ_ONCE(entry->w_ctx.flags);
-               if (flags & PBLK_IOTYPE_USER)
-                       user_io++;
-               else if (flags & PBLK_IOTYPE_GC)
-                       gc_io++;
-               else
-                       WARN(1, "pblk: unknown IO type\n");
-
-               pblk_update_map_dev(pblk, w_ctx->lba, w_ctx->ppa,
-                                                       entry->cacheline);
-
-               line = pblk_ppa_to_line(pblk, w_ctx->ppa);
-               atomic_dec(&line->sec_to_update);
-               kref_put(&line->ref, pblk_line_put);
-               clean_wctx(w_ctx);
-               rb->l2p_update = pblk_rb_ptr_wrap(rb, rb->l2p_update, 1);
-       }
-
-       pblk_rl_out(&pblk->rl, user_io, gc_io);
-
-       return 0;
-}
-
-/*
- * When we move the l2p_update pointer, we update the l2p table - lookups will
- * point to the physical address instead of to the cacheline in the write buffer
- * from this moment on.
- */
-static int pblk_rb_update_l2p(struct pblk_rb *rb, unsigned int nr_entries,
-                             unsigned int mem, unsigned int sync)
-{
-       unsigned int space, count;
-       int ret = 0;
-
-       lockdep_assert_held(&rb->w_lock);
-
-       /* Update l2p only as buffer entries are being overwritten */
-       space = pblk_rb_ring_space(rb, mem, rb->l2p_update, rb->nr_entries);
-       if (space > nr_entries)
-               goto out;
-
-       count = nr_entries - space;
-       /* l2p_update used exclusively under rb->w_lock */
-       ret = __pblk_rb_update_l2p(rb, count);
-
-out:
-       return ret;
-}
-
-/*
- * Update the l2p entry for all sectors stored on the write buffer. This means
- * that all future lookups to the l2p table will point to a device address, not
- * to the cacheline in the write buffer.
- */
-void pblk_rb_sync_l2p(struct pblk_rb *rb)
-{
-       unsigned int sync;
-       unsigned int to_update;
-
-       spin_lock(&rb->w_lock);
-
-       /* Protect from reads and writes */
-       sync = smp_load_acquire(&rb->sync);
-
-       to_update = pblk_rb_ring_count(sync, rb->l2p_update, rb->nr_entries);
-       __pblk_rb_update_l2p(rb, to_update);
-
-       spin_unlock(&rb->w_lock);
-}
-
-/*
- * Write @nr_entries to ring buffer from @data buffer if there is enough space.
- * Typically, 4KB data chunks coming from a bio will be copied to the ring
- * buffer, thus the write will fail if not all incoming data can be copied.
- *
- */
-static void __pblk_rb_write_entry(struct pblk_rb *rb, void *data,
-                                 struct pblk_w_ctx w_ctx,
-                                 struct pblk_rb_entry *entry)
-{
-       memcpy(entry->data, data, rb->seg_size);
-
-       entry->w_ctx.lba = w_ctx.lba;
-       entry->w_ctx.ppa = w_ctx.ppa;
-}
-
-void pblk_rb_write_entry_user(struct pblk_rb *rb, void *data,
-                             struct pblk_w_ctx w_ctx, unsigned int ring_pos)
-{
-       struct pblk *pblk = container_of(rb, struct pblk, rwb);
-       struct pblk_rb_entry *entry;
-       int flags;
-
-       entry = &rb->entries[ring_pos];
-       flags = READ_ONCE(entry->w_ctx.flags);
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       /* Caller must guarantee that the entry is free */
-       BUG_ON(!(flags & PBLK_WRITABLE_ENTRY));
-#endif
-
-       __pblk_rb_write_entry(rb, data, w_ctx, entry);
-
-       pblk_update_map_cache(pblk, w_ctx.lba, entry->cacheline);
-       flags = w_ctx.flags | PBLK_WRITTEN_DATA;
-
-       /* Release flags on write context. Protect from writes */
-       smp_store_release(&entry->w_ctx.flags, flags);
-}
-
-void pblk_rb_write_entry_gc(struct pblk_rb *rb, void *data,
-                           struct pblk_w_ctx w_ctx, struct pblk_line *line,
-                           u64 paddr, unsigned int ring_pos)
-{
-       struct pblk *pblk = container_of(rb, struct pblk, rwb);
-       struct pblk_rb_entry *entry;
-       int flags;
-
-       entry = &rb->entries[ring_pos];
-       flags = READ_ONCE(entry->w_ctx.flags);
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       /* Caller must guarantee that the entry is free */
-       BUG_ON(!(flags & PBLK_WRITABLE_ENTRY));
-#endif
-
-       __pblk_rb_write_entry(rb, data, w_ctx, entry);
-
-       if (!pblk_update_map_gc(pblk, w_ctx.lba, entry->cacheline, line, paddr))
-               entry->w_ctx.lba = ADDR_EMPTY;
-
-       flags = w_ctx.flags | PBLK_WRITTEN_DATA;
-
-       /* Release flags on write context. Protect from writes */
-       smp_store_release(&entry->w_ctx.flags, flags);
-}
-
-static int pblk_rb_flush_point_set(struct pblk_rb *rb, struct bio *bio,
-                                  unsigned int pos)
-{
-       struct pblk_rb_entry *entry;
-       unsigned int sync, flush_point;
-
-       pblk_rb_sync_init(rb, NULL);
-       sync = READ_ONCE(rb->sync);
-
-       if (pos == sync) {
-               pblk_rb_sync_end(rb, NULL);
-               return 0;
-       }
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       atomic_inc(&rb->inflight_flush_point);
-#endif
-
-       flush_point = (pos == 0) ? (rb->nr_entries - 1) : (pos - 1);
-       entry = &rb->entries[flush_point];
-
-       /* Protect flush points */
-       smp_store_release(&rb->flush_point, flush_point);
-
-       if (bio)
-               bio_list_add(&entry->w_ctx.bios, bio);
-
-       pblk_rb_sync_end(rb, NULL);
-
-       return bio ? 1 : 0;
-}
-
-static int __pblk_rb_may_write(struct pblk_rb *rb, unsigned int nr_entries,
-                              unsigned int *pos)
-{
-       unsigned int mem;
-       unsigned int sync;
-       unsigned int threshold;
-
-       sync = READ_ONCE(rb->sync);
-       mem = READ_ONCE(rb->mem);
-
-       threshold = nr_entries + rb->back_thres;
-
-       if (pblk_rb_ring_space(rb, mem, sync, rb->nr_entries) < threshold)
-               return 0;
-
-       if (pblk_rb_update_l2p(rb, nr_entries, mem, sync))
-               return 0;
-
-       *pos = mem;
-
-       return 1;
-}
-
-static int pblk_rb_may_write(struct pblk_rb *rb, unsigned int nr_entries,
-                            unsigned int *pos)
-{
-       if (!__pblk_rb_may_write(rb, nr_entries, pos))
-               return 0;
-
-       /* Protect from read count */
-       smp_store_release(&rb->mem, pblk_rb_ptr_wrap(rb, *pos, nr_entries));
-       return 1;
-}
-
-void pblk_rb_flush(struct pblk_rb *rb)
-{
-       struct pblk *pblk = container_of(rb, struct pblk, rwb);
-       unsigned int mem = READ_ONCE(rb->mem);
-
-       if (pblk_rb_flush_point_set(rb, NULL, mem))
-               return;
-
-       pblk_write_kick(pblk);
-}
-
-static int pblk_rb_may_write_flush(struct pblk_rb *rb, unsigned int nr_entries,
-                                  unsigned int *pos, struct bio *bio,
-                                  int *io_ret)
-{
-       unsigned int mem;
-
-       if (!__pblk_rb_may_write(rb, nr_entries, pos))
-               return 0;
-
-       mem = pblk_rb_ptr_wrap(rb, *pos, nr_entries);
-       *io_ret = NVM_IO_DONE;
-
-       if (bio->bi_opf & REQ_PREFLUSH) {
-               struct pblk *pblk = container_of(rb, struct pblk, rwb);
-
-               atomic64_inc(&pblk->nr_flush);
-               if (pblk_rb_flush_point_set(&pblk->rwb, bio, mem))
-                       *io_ret = NVM_IO_OK;
-       }
-
-       /* Protect from read count */
-       smp_store_release(&rb->mem, mem);
-
-       return 1;
-}
-
-/*
- * Atomically check that (i) there is space on the write buffer for the
- * incoming I/O, and (ii) the current I/O type has enough budget in the write
- * buffer (rate-limiter).
- */
-int pblk_rb_may_write_user(struct pblk_rb *rb, struct bio *bio,
-                          unsigned int nr_entries, unsigned int *pos)
-{
-       struct pblk *pblk = container_of(rb, struct pblk, rwb);
-       int io_ret;
-
-       spin_lock(&rb->w_lock);
-       io_ret = pblk_rl_user_may_insert(&pblk->rl, nr_entries);
-       if (io_ret) {
-               spin_unlock(&rb->w_lock);
-               return io_ret;
-       }
-
-       if (!pblk_rb_may_write_flush(rb, nr_entries, pos, bio, &io_ret)) {
-               spin_unlock(&rb->w_lock);
-               return NVM_IO_REQUEUE;
-       }
-
-       pblk_rl_user_in(&pblk->rl, nr_entries);
-       spin_unlock(&rb->w_lock);
-
-       return io_ret;
-}
-
-/*
- * Look at pblk_rb_may_write_user comment
- */
-int pblk_rb_may_write_gc(struct pblk_rb *rb, unsigned int nr_entries,
-                        unsigned int *pos)
-{
-       struct pblk *pblk = container_of(rb, struct pblk, rwb);
-
-       spin_lock(&rb->w_lock);
-       if (!pblk_rl_gc_may_insert(&pblk->rl, nr_entries)) {
-               spin_unlock(&rb->w_lock);
-               return 0;
-       }
-
-       if (!pblk_rb_may_write(rb, nr_entries, pos)) {
-               spin_unlock(&rb->w_lock);
-               return 0;
-       }
-
-       pblk_rl_gc_in(&pblk->rl, nr_entries);
-       spin_unlock(&rb->w_lock);
-
-       return 1;
-}
-
-/*
- * Read available entries on rb and add them to the given bio. To avoid a memory
- * copy, a page reference to the write buffer is used to be added to the bio.
- *
- * This function is used by the write thread to form the write bio that will
- * persist data on the write buffer to the media.
- */
-unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd,
-                                unsigned int pos, unsigned int nr_entries,
-                                unsigned int count)
-{
-       struct pblk *pblk = container_of(rb, struct pblk, rwb);
-       struct request_queue *q = pblk->dev->q;
-       struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
-       struct bio *bio = rqd->bio;
-       struct pblk_rb_entry *entry;
-       struct page *page;
-       unsigned int pad = 0, to_read = nr_entries;
-       unsigned int i;
-       int flags;
-
-       if (count < nr_entries) {
-               pad = nr_entries - count;
-               to_read = count;
-       }
-
-       /* Add space for packed metadata if in use*/
-       pad += (pblk->min_write_pgs - pblk->min_write_pgs_data);
-
-       c_ctx->sentry = pos;
-       c_ctx->nr_valid = to_read;
-       c_ctx->nr_padded = pad;
-
-       for (i = 0; i < to_read; i++) {
-               entry = &rb->entries[pos];
-
-               /* A write has been allowed into the buffer, but data is still
-                * being copied to it. It is ok to busy wait.
-                */
-try:
-               flags = READ_ONCE(entry->w_ctx.flags);
-               if (!(flags & PBLK_WRITTEN_DATA)) {
-                       io_schedule();
-                       goto try;
-               }
-
-               page = virt_to_page(entry->data);
-               if (!page) {
-                       pblk_err(pblk, "could not allocate write bio page\n");
-                       flags &= ~PBLK_WRITTEN_DATA;
-                       flags |= PBLK_SUBMITTED_ENTRY;
-                       /* Release flags on context. Protect from writes */
-                       smp_store_release(&entry->w_ctx.flags, flags);
-                       return NVM_IO_ERR;
-               }
-
-               if (bio_add_pc_page(q, bio, page, rb->seg_size, 0) !=
-                                                               rb->seg_size) {
-                       pblk_err(pblk, "could not add page to write bio\n");
-                       flags &= ~PBLK_WRITTEN_DATA;
-                       flags |= PBLK_SUBMITTED_ENTRY;
-                       /* Release flags on context. Protect from writes */
-                       smp_store_release(&entry->w_ctx.flags, flags);
-                       return NVM_IO_ERR;
-               }
-
-               flags &= ~PBLK_WRITTEN_DATA;
-               flags |= PBLK_SUBMITTED_ENTRY;
-
-               /* Release flags on context. Protect from writes */
-               smp_store_release(&entry->w_ctx.flags, flags);
-
-               pos = pblk_rb_ptr_wrap(rb, pos, 1);
-       }
-
-       if (pad) {
-               if (pblk_bio_add_pages(pblk, bio, GFP_KERNEL, pad)) {
-                       pblk_err(pblk, "could not pad page in write bio\n");
-                       return NVM_IO_ERR;
-               }
-
-               if (pad < pblk->min_write_pgs)
-                       atomic64_inc(&pblk->pad_dist[pad - 1]);
-               else
-                       pblk_warn(pblk, "padding more than min. sectors\n");
-
-               atomic64_add(pad, &pblk->pad_wa);
-       }
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       atomic_long_add(pad, &pblk->padded_writes);
-#endif
-
-       return NVM_IO_OK;
-}
-
-/*
- * Copy to bio only if the lba matches the one on the given cache entry.
- * Otherwise, it means that the entry has been overwritten, and the bio should
- * be directed to disk.
- */
-int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
-                       struct ppa_addr ppa)
-{
-       struct pblk *pblk = container_of(rb, struct pblk, rwb);
-       struct pblk_rb_entry *entry;
-       struct pblk_w_ctx *w_ctx;
-       struct ppa_addr l2p_ppa;
-       u64 pos = pblk_addr_to_cacheline(ppa);
-       void *data;
-       int flags;
-       int ret = 1;
-
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       /* Caller must ensure that the access will not cause an overflow */
-       BUG_ON(pos >= rb->nr_entries);
-#endif
-       entry = &rb->entries[pos];
-       w_ctx = &entry->w_ctx;
-       flags = READ_ONCE(w_ctx->flags);
-
-       spin_lock(&rb->w_lock);
-       spin_lock(&pblk->trans_lock);
-       l2p_ppa = pblk_trans_map_get(pblk, lba);
-       spin_unlock(&pblk->trans_lock);
-
-       /* Check if the entry has been overwritten or is scheduled to be */
-       if (!pblk_ppa_comp(l2p_ppa, ppa) || w_ctx->lba != lba ||
-                                               flags & PBLK_WRITABLE_ENTRY) {
-               ret = 0;
-               goto out;
-       }
-       data = bio_data(bio);
-       memcpy(data, entry->data, rb->seg_size);
-
-out:
-       spin_unlock(&rb->w_lock);
-       return ret;
-}
-
-struct pblk_w_ctx *pblk_rb_w_ctx(struct pblk_rb *rb, unsigned int pos)
-{
-       unsigned int entry = pblk_rb_ptr_wrap(rb, pos, 0);
-
-       return &rb->entries[entry].w_ctx;
-}
-
-unsigned int pblk_rb_sync_init(struct pblk_rb *rb, unsigned long *flags)
-       __acquires(&rb->s_lock)
-{
-       if (flags)
-               spin_lock_irqsave(&rb->s_lock, *flags);
-       else
-               spin_lock_irq(&rb->s_lock);
-
-       return rb->sync;
-}
-
-void pblk_rb_sync_end(struct pblk_rb *rb, unsigned long *flags)
-       __releases(&rb->s_lock)
-{
-       lockdep_assert_held(&rb->s_lock);
-
-       if (flags)
-               spin_unlock_irqrestore(&rb->s_lock, *flags);
-       else
-               spin_unlock_irq(&rb->s_lock);
-}
-
-unsigned int pblk_rb_sync_advance(struct pblk_rb *rb, unsigned int nr_entries)
-{
-       unsigned int sync, flush_point;
-       lockdep_assert_held(&rb->s_lock);
-
-       sync = READ_ONCE(rb->sync);
-       flush_point = READ_ONCE(rb->flush_point);
-
-       if (flush_point != EMPTY_ENTRY) {
-               unsigned int secs_to_flush;
-
-               secs_to_flush = pblk_rb_ring_count(flush_point, sync,
-                                       rb->nr_entries);
-               if (secs_to_flush < nr_entries) {
-                       /* Protect flush points */
-                       smp_store_release(&rb->flush_point, EMPTY_ENTRY);
-               }
-       }
-
-       sync = pblk_rb_ptr_wrap(rb, sync, nr_entries);
-
-       /* Protect from counts */
-       smp_store_release(&rb->sync, sync);
-
-       return sync;
-}
-
-/* Calculate how many sectors to submit up to the current flush point. */
-unsigned int pblk_rb_flush_point_count(struct pblk_rb *rb)
-{
-       unsigned int subm, sync, flush_point;
-       unsigned int submitted, to_flush;
-
-       /* Protect flush points */
-       flush_point = smp_load_acquire(&rb->flush_point);
-       if (flush_point == EMPTY_ENTRY)
-               return 0;
-
-       /* Protect syncs */
-       sync = smp_load_acquire(&rb->sync);
-
-       subm = READ_ONCE(rb->subm);
-       submitted = pblk_rb_ring_count(subm, sync, rb->nr_entries);
-
-       /* The sync point itself counts as a sector to sync */
-       to_flush = pblk_rb_ring_count(flush_point, sync, rb->nr_entries) + 1;
-
-       return (submitted < to_flush) ? (to_flush - submitted) : 0;
-}
-
-int pblk_rb_tear_down_check(struct pblk_rb *rb)
-{
-       struct pblk_rb_entry *entry;
-       int i;
-       int ret = 0;
-
-       spin_lock(&rb->w_lock);
-       spin_lock_irq(&rb->s_lock);
-
-       if ((rb->mem == rb->subm) && (rb->subm == rb->sync) &&
-                               (rb->sync == rb->l2p_update) &&
-                               (rb->flush_point == EMPTY_ENTRY)) {
-               goto out;
-       }
-
-       if (!rb->entries) {
-               ret = 1;
-               goto out;
-       }
-
-       for (i = 0; i < rb->nr_entries; i++) {
-               entry = &rb->entries[i];
-
-               if (!entry->data) {
-                       ret = 1;
-                       goto out;
-               }
-       }
-
-out:
-       spin_unlock_irq(&rb->s_lock);
-       spin_unlock(&rb->w_lock);
-
-       return ret;
-}
-
-unsigned int pblk_rb_wrap_pos(struct pblk_rb *rb, unsigned int pos)
-{
-       return (pos & (rb->nr_entries - 1));
-}
-
-int pblk_rb_pos_oob(struct pblk_rb *rb, u64 pos)
-{
-       return (pos >= rb->nr_entries);
-}
-
-ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf)
-{
-       struct pblk *pblk = container_of(rb, struct pblk, rwb);
-       struct pblk_c_ctx *c;
-       ssize_t offset;
-       int queued_entries = 0;
-
-       spin_lock_irq(&rb->s_lock);
-       list_for_each_entry(c, &pblk->compl_list, list)
-               queued_entries++;
-       spin_unlock_irq(&rb->s_lock);
-
-       if (rb->flush_point != EMPTY_ENTRY)
-               offset = scnprintf(buf, PAGE_SIZE,
-                       "%u\t%u\t%u\t%u\t%u\t%u\t%u - %u/%u/%u - %d\n",
-                       rb->nr_entries,
-                       rb->mem,
-                       rb->subm,
-                       rb->sync,
-                       rb->l2p_update,
-#ifdef CONFIG_NVM_PBLK_DEBUG
-                       atomic_read(&rb->inflight_flush_point),
-#else
-                       0,
-#endif
-                       rb->flush_point,
-                       pblk_rb_read_count(rb),
-                       pblk_rb_space(rb),
-                       pblk_rb_flush_point_count(rb),
-                       queued_entries);
-       else
-               offset = scnprintf(buf, PAGE_SIZE,
-                       "%u\t%u\t%u\t%u\t%u\t%u\tNULL - %u/%u/%u - %d\n",
-                       rb->nr_entries,
-                       rb->mem,
-                       rb->subm,
-                       rb->sync,
-                       rb->l2p_update,
-#ifdef CONFIG_NVM_PBLK_DEBUG
-                       atomic_read(&rb->inflight_flush_point),
-#else
-                       0,
-#endif
-                       pblk_rb_read_count(rb),
-                       pblk_rb_space(rb),
-                       pblk_rb_flush_point_count(rb),
-                       queued_entries);
-
-       return offset;
-}
diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c
deleted file mode 100644 (file)
index c28537a..0000000
+++ /dev/null
@@ -1,474 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2016 CNEX Labs
- * Initial release: Javier Gonzalez <javier@cnexlabs.com>
- *                  Matias Bjorling <matias@cnexlabs.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * pblk-read.c - pblk's read path
- */
-
-#include "pblk.h"
-
-/*
- * There is no guarantee that the value read from cache has not been updated and
- * resides at another location in the cache. We guarantee though that if the
- * value is read from the cache, it belongs to the mapped lba. In order to
- * guarantee and order between writes and reads are ordered, a flush must be
- * issued.
- */
-static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
-                               sector_t lba, struct ppa_addr ppa)
-{
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       /* Callers must ensure that the ppa points to a cache address */
-       BUG_ON(pblk_ppa_empty(ppa));
-       BUG_ON(!pblk_addr_in_cache(ppa));
-#endif
-
-       return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa);
-}
-
-static int pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
-                                struct bio *bio, sector_t blba,
-                                bool *from_cache)
-{
-       void *meta_list = rqd->meta_list;
-       int nr_secs, i;
-
-retry:
-       nr_secs = pblk_lookup_l2p_seq(pblk, rqd->ppa_list, blba, rqd->nr_ppas,
-                                       from_cache);
-
-       if (!*from_cache)
-               goto end;
-
-       for (i = 0; i < nr_secs; i++) {
-               struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i);
-               sector_t lba = blba + i;
-
-               if (pblk_ppa_empty(rqd->ppa_list[i])) {
-                       __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
-
-                       meta->lba = addr_empty;
-               } else if (pblk_addr_in_cache(rqd->ppa_list[i])) {
-                       /*
-                        * Try to read from write buffer. The address is later
-                        * checked on the write buffer to prevent retrieving
-                        * overwritten data.
-                        */
-                       if (!pblk_read_from_cache(pblk, bio, lba,
-                                                       rqd->ppa_list[i])) {
-                               if (i == 0) {
-                                       /*
-                                        * We didn't call with bio_advance()
-                                        * yet, so we can just retry.
-                                        */
-                                       goto retry;
-                               } else {
-                                       /*
-                                        * We already call bio_advance()
-                                        * so we cannot retry and we need
-                                        * to quit that function in order
-                                        * to allow caller to handle the bio
-                                        * splitting in the current sector
-                                        * position.
-                                        */
-                                       nr_secs = i;
-                                       goto end;
-                               }
-                       }
-                       meta->lba = cpu_to_le64(lba);
-#ifdef CONFIG_NVM_PBLK_DEBUG
-                       atomic_long_inc(&pblk->cache_reads);
-#endif
-               }
-               bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
-       }
-
-end:
-       if (pblk_io_aligned(pblk, nr_secs))
-               rqd->is_seq = 1;
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       atomic_long_add(nr_secs, &pblk->inflight_reads);
-#endif
-
-       return nr_secs;
-}
-
-
-static void pblk_read_check_seq(struct pblk *pblk, struct nvm_rq *rqd,
-                               sector_t blba)
-{
-       void *meta_list = rqd->meta_list;
-       int nr_lbas = rqd->nr_ppas;
-       int i;
-
-       if (!pblk_is_oob_meta_supported(pblk))
-               return;
-
-       for (i = 0; i < nr_lbas; i++) {
-               struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i);
-               u64 lba = le64_to_cpu(meta->lba);
-
-               if (lba == ADDR_EMPTY)
-                       continue;
-
-               if (lba != blba + i) {
-#ifdef CONFIG_NVM_PBLK_DEBUG
-                       struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
-
-                       print_ppa(pblk, &ppa_list[i], "seq", i);
-#endif
-                       pblk_err(pblk, "corrupted read LBA (%llu/%llu)\n",
-                                                       lba, (u64)blba + i);
-                       WARN_ON(1);
-               }
-       }
-}
-
-/*
- * There can be holes in the lba list.
- */
-static void pblk_read_check_rand(struct pblk *pblk, struct nvm_rq *rqd,
-                                u64 *lba_list, int nr_lbas)
-{
-       void *meta_lba_list = rqd->meta_list;
-       int i, j;
-
-       if (!pblk_is_oob_meta_supported(pblk))
-               return;
-
-       for (i = 0, j = 0; i < nr_lbas; i++) {
-               struct pblk_sec_meta *meta = pblk_get_meta(pblk,
-                                                          meta_lba_list, j);
-               u64 lba = lba_list[i];
-               u64 meta_lba;
-
-               if (lba == ADDR_EMPTY)
-                       continue;
-
-               meta_lba = le64_to_cpu(meta->lba);
-
-               if (lba != meta_lba) {
-#ifdef CONFIG_NVM_PBLK_DEBUG
-                       struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
-
-                       print_ppa(pblk, &ppa_list[j], "rnd", j);
-#endif
-                       pblk_err(pblk, "corrupted read LBA (%llu/%llu)\n",
-                                                       meta_lba, lba);
-                       WARN_ON(1);
-               }
-
-               j++;
-       }
-
-       WARN_ONCE(j != rqd->nr_ppas, "pblk: corrupted random request\n");
-}
-
-static void pblk_end_user_read(struct bio *bio, int error)
-{
-       if (error && error != NVM_RSP_WARN_HIGHECC)
-               bio_io_error(bio);
-       else
-               bio_endio(bio);
-}
-
-static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd,
-                              bool put_line)
-{
-       struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
-       struct bio *int_bio = rqd->bio;
-       unsigned long start_time = r_ctx->start_time;
-
-       bio_end_io_acct(int_bio, start_time);
-
-       if (rqd->error)
-               pblk_log_read_err(pblk, rqd);
-
-       pblk_read_check_seq(pblk, rqd, r_ctx->lba);
-       bio_put(int_bio);
-
-       if (put_line)
-               pblk_rq_to_line_put(pblk, rqd);
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       atomic_long_add(rqd->nr_ppas, &pblk->sync_reads);
-       atomic_long_sub(rqd->nr_ppas, &pblk->inflight_reads);
-#endif
-
-       pblk_free_rqd(pblk, rqd, PBLK_READ);
-       atomic_dec(&pblk->inflight_io);
-}
-
-static void pblk_end_io_read(struct nvm_rq *rqd)
-{
-       struct pblk *pblk = rqd->private;
-       struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
-       struct bio *bio = (struct bio *)r_ctx->private;
-
-       pblk_end_user_read(bio, rqd->error);
-       __pblk_end_io_read(pblk, rqd, true);
-}
-
-static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd, struct bio *bio,
-                        sector_t lba, bool *from_cache)
-{
-       struct pblk_sec_meta *meta = pblk_get_meta(pblk, rqd->meta_list, 0);
-       struct ppa_addr ppa;
-
-       pblk_lookup_l2p_seq(pblk, &ppa, lba, 1, from_cache);
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       atomic_long_inc(&pblk->inflight_reads);
-#endif
-
-retry:
-       if (pblk_ppa_empty(ppa)) {
-               __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
-
-               meta->lba = addr_empty;
-               return;
-       }
-
-       /* Try to read from write buffer. The address is later checked on the
-        * write buffer to prevent retrieving overwritten data.
-        */
-       if (pblk_addr_in_cache(ppa)) {
-               if (!pblk_read_from_cache(pblk, bio, lba, ppa)) {
-                       pblk_lookup_l2p_seq(pblk, &ppa, lba, 1, from_cache);
-                       goto retry;
-               }
-
-               meta->lba = cpu_to_le64(lba);
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-               atomic_long_inc(&pblk->cache_reads);
-#endif
-       } else {
-               rqd->ppa_addr = ppa;
-       }
-}
-
-void pblk_submit_read(struct pblk *pblk, struct bio *bio)
-{
-       sector_t blba = pblk_get_lba(bio);
-       unsigned int nr_secs = pblk_get_secs(bio);
-       bool from_cache;
-       struct pblk_g_ctx *r_ctx;
-       struct nvm_rq *rqd;
-       struct bio *int_bio, *split_bio;
-       unsigned long start_time;
-
-       start_time = bio_start_io_acct(bio);
-
-       rqd = pblk_alloc_rqd(pblk, PBLK_READ);
-
-       rqd->opcode = NVM_OP_PREAD;
-       rqd->nr_ppas = nr_secs;
-       rqd->private = pblk;
-       rqd->end_io = pblk_end_io_read;
-
-       r_ctx = nvm_rq_to_pdu(rqd);
-       r_ctx->start_time = start_time;
-       r_ctx->lba = blba;
-
-       if (pblk_alloc_rqd_meta(pblk, rqd)) {
-               bio_io_error(bio);
-               pblk_free_rqd(pblk, rqd, PBLK_READ);
-               return;
-       }
-
-       /* Clone read bio to deal internally with:
-        * -read errors when reading from drive
-        * -bio_advance() calls during cache reads
-        */
-       int_bio = bio_clone_fast(bio, GFP_KERNEL, &pblk_bio_set);
-
-       if (nr_secs > 1)
-               nr_secs = pblk_read_ppalist_rq(pblk, rqd, int_bio, blba,
-                                               &from_cache);
-       else
-               pblk_read_rq(pblk, rqd, int_bio, blba, &from_cache);
-
-split_retry:
-       r_ctx->private = bio; /* original bio */
-       rqd->bio = int_bio; /* internal bio */
-
-       if (from_cache && nr_secs == rqd->nr_ppas) {
-               /* All data was read from cache, we can complete the IO. */
-               pblk_end_user_read(bio, 0);
-               atomic_inc(&pblk->inflight_io);
-               __pblk_end_io_read(pblk, rqd, false);
-       } else if (nr_secs != rqd->nr_ppas) {
-               /* The read bio request could be partially filled by the write
-                * buffer, but there are some holes that need to be read from
-                * the drive. In order to handle this, we will use block layer
-                * mechanism to split this request in to smaller ones and make
-                * a chain of it.
-                */
-               split_bio = bio_split(bio, nr_secs * NR_PHY_IN_LOG, GFP_KERNEL,
-                                       &pblk_bio_set);
-               bio_chain(split_bio, bio);
-               submit_bio_noacct(bio);
-
-               /* New bio contains first N sectors of the previous one, so
-                * we can continue to use existing rqd, but we need to shrink
-                * the number of PPAs in it. New bio is also guaranteed that
-                * it contains only either data from cache or from drive, newer
-                * mix of them.
-                */
-               bio = split_bio;
-               rqd->nr_ppas = nr_secs;
-               if (rqd->nr_ppas == 1)
-                       rqd->ppa_addr = rqd->ppa_list[0];
-
-               /* Recreate int_bio - existing might have some needed internal
-                * fields modified already.
-                */
-               bio_put(int_bio);
-               int_bio = bio_clone_fast(bio, GFP_KERNEL, &pblk_bio_set);
-               goto split_retry;
-       } else if (pblk_submit_io(pblk, rqd, NULL)) {
-               /* Submitting IO to drive failed, let's report an error */
-               rqd->error = -ENODEV;
-               pblk_end_io_read(rqd);
-       }
-}
-
-static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
-                             struct pblk_line *line, u64 *lba_list,
-                             u64 *paddr_list_gc, unsigned int nr_secs)
-{
-       struct ppa_addr ppa_list_l2p[NVM_MAX_VLBA];
-       struct ppa_addr ppa_gc;
-       int valid_secs = 0;
-       int i;
-
-       pblk_lookup_l2p_rand(pblk, ppa_list_l2p, lba_list, nr_secs);
-
-       for (i = 0; i < nr_secs; i++) {
-               if (lba_list[i] == ADDR_EMPTY)
-                       continue;
-
-               ppa_gc = addr_to_gen_ppa(pblk, paddr_list_gc[i], line->id);
-               if (!pblk_ppa_comp(ppa_list_l2p[i], ppa_gc)) {
-                       paddr_list_gc[i] = lba_list[i] = ADDR_EMPTY;
-                       continue;
-               }
-
-               rqd->ppa_list[valid_secs++] = ppa_list_l2p[i];
-       }
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       atomic_long_add(valid_secs, &pblk->inflight_reads);
-#endif
-
-       return valid_secs;
-}
-
-static int read_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
-                     struct pblk_line *line, sector_t lba,
-                     u64 paddr_gc)
-{
-       struct ppa_addr ppa_l2p, ppa_gc;
-       int valid_secs = 0;
-
-       if (lba == ADDR_EMPTY)
-               goto out;
-
-       /* logic error: lba out-of-bounds */
-       if (lba >= pblk->capacity) {
-               WARN(1, "pblk: read lba out of bounds\n");
-               goto out;
-       }
-
-       spin_lock(&pblk->trans_lock);
-       ppa_l2p = pblk_trans_map_get(pblk, lba);
-       spin_unlock(&pblk->trans_lock);
-
-       ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, line->id);
-       if (!pblk_ppa_comp(ppa_l2p, ppa_gc))
-               goto out;
-
-       rqd->ppa_addr = ppa_l2p;
-       valid_secs = 1;
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       atomic_long_inc(&pblk->inflight_reads);
-#endif
-
-out:
-       return valid_secs;
-}
-
-int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
-{
-       struct nvm_rq rqd;
-       int ret = NVM_IO_OK;
-
-       memset(&rqd, 0, sizeof(struct nvm_rq));
-
-       ret = pblk_alloc_rqd_meta(pblk, &rqd);
-       if (ret)
-               return ret;
-
-       if (gc_rq->nr_secs > 1) {
-               gc_rq->secs_to_gc = read_ppalist_rq_gc(pblk, &rqd, gc_rq->line,
-                                                       gc_rq->lba_list,
-                                                       gc_rq->paddr_list,
-                                                       gc_rq->nr_secs);
-               if (gc_rq->secs_to_gc == 1)
-                       rqd.ppa_addr = rqd.ppa_list[0];
-       } else {
-               gc_rq->secs_to_gc = read_rq_gc(pblk, &rqd, gc_rq->line,
-                                                       gc_rq->lba_list[0],
-                                                       gc_rq->paddr_list[0]);
-       }
-
-       if (!(gc_rq->secs_to_gc))
-               goto out;
-
-       rqd.opcode = NVM_OP_PREAD;
-       rqd.nr_ppas = gc_rq->secs_to_gc;
-
-       if (pblk_submit_io_sync(pblk, &rqd, gc_rq->data)) {
-               ret = -EIO;
-               goto err_free_dma;
-       }
-
-       pblk_read_check_rand(pblk, &rqd, gc_rq->lba_list, gc_rq->nr_secs);
-
-       atomic_dec(&pblk->inflight_io);
-
-       if (rqd.error) {
-               atomic_long_inc(&pblk->read_failed_gc);
-#ifdef CONFIG_NVM_PBLK_DEBUG
-               pblk_print_failed_rqd(pblk, &rqd, rqd.error);
-#endif
-       }
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       atomic_long_add(gc_rq->secs_to_gc, &pblk->sync_reads);
-       atomic_long_add(gc_rq->secs_to_gc, &pblk->recov_gc_reads);
-       atomic_long_sub(gc_rq->secs_to_gc, &pblk->inflight_reads);
-#endif
-
-out:
-       pblk_free_rqd_meta(pblk, &rqd);
-       return ret;
-
-err_free_dma:
-       pblk_free_rqd_meta(pblk, &rqd);
-       return ret;
-}
diff --git a/drivers/lightnvm/pblk-recovery.c b/drivers/lightnvm/pblk-recovery.c
deleted file mode 100644 (file)
index 0e6f0c7..0000000
+++ /dev/null
@@ -1,874 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2016 CNEX Labs
- * Initial: Javier Gonzalez <javier@cnexlabs.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * pblk-recovery.c - pblk's recovery path
- *
- * The L2P recovery path is single threaded as the L2P table is updated in order
- * following the line sequence ID.
- */
-
-#include "pblk.h"
-#include "pblk-trace.h"
-
-int pblk_recov_check_emeta(struct pblk *pblk, struct line_emeta *emeta_buf)
-{
-       u32 crc;
-
-       crc = pblk_calc_emeta_crc(pblk, emeta_buf);
-       if (le32_to_cpu(emeta_buf->crc) != crc)
-               return 1;
-
-       if (le32_to_cpu(emeta_buf->header.identifier) != PBLK_MAGIC)
-               return 1;
-
-       return 0;
-}
-
-static int pblk_recov_l2p_from_emeta(struct pblk *pblk, struct pblk_line *line)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct pblk_line_meta *lm = &pblk->lm;
-       struct pblk_emeta *emeta = line->emeta;
-       struct line_emeta *emeta_buf = emeta->buf;
-       __le64 *lba_list;
-       u64 data_start, data_end;
-       u64 nr_valid_lbas, nr_lbas = 0;
-       u64 i;
-
-       lba_list = emeta_to_lbas(pblk, emeta_buf);
-       if (!lba_list)
-               return 1;
-
-       data_start = pblk_line_smeta_start(pblk, line) + lm->smeta_sec;
-       data_end = line->emeta_ssec;
-       nr_valid_lbas = le64_to_cpu(emeta_buf->nr_valid_lbas);
-
-       for (i = data_start; i < data_end; i++) {
-               struct ppa_addr ppa;
-               int pos;
-
-               ppa = addr_to_gen_ppa(pblk, i, line->id);
-               pos = pblk_ppa_to_pos(geo, ppa);
-
-               /* Do not update bad blocks */
-               if (test_bit(pos, line->blk_bitmap))
-                       continue;
-
-               if (le64_to_cpu(lba_list[i]) == ADDR_EMPTY) {
-                       spin_lock(&line->lock);
-                       if (test_and_set_bit(i, line->invalid_bitmap))
-                               WARN_ONCE(1, "pblk: rec. double invalidate:\n");
-                       else
-                               le32_add_cpu(line->vsc, -1);
-                       spin_unlock(&line->lock);
-
-                       continue;
-               }
-
-               pblk_update_map(pblk, le64_to_cpu(lba_list[i]), ppa);
-               nr_lbas++;
-       }
-
-       if (nr_valid_lbas != nr_lbas)
-               pblk_err(pblk, "line %d - inconsistent lba list(%llu/%llu)\n",
-                               line->id, nr_valid_lbas, nr_lbas);
-
-       line->left_msecs = 0;
-
-       return 0;
-}
-
-static void pblk_update_line_wp(struct pblk *pblk, struct pblk_line *line,
-                               u64 written_secs)
-{
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       int i;
-
-       for (i = 0; i < written_secs; i += pblk->min_write_pgs)
-               __pblk_alloc_page(pblk, line, pblk->min_write_pgs);
-
-       spin_lock(&l_mg->free_lock);
-       if (written_secs > line->left_msecs) {
-               /*
-                * We have all data sectors written
-                * and some emeta sectors written too.
-                */
-               line->left_msecs = 0;
-       } else {
-               /* We have only some data sectors written. */
-               line->left_msecs -= written_secs;
-       }
-       spin_unlock(&l_mg->free_lock);
-}
-
-static u64 pblk_sec_in_open_line(struct pblk *pblk, struct pblk_line *line)
-{
-       struct pblk_line_meta *lm = &pblk->lm;
-       int nr_bb = bitmap_weight(line->blk_bitmap, lm->blk_per_line);
-       u64 written_secs = 0;
-       int valid_chunks = 0;
-       int i;
-
-       for (i = 0; i < lm->blk_per_line; i++) {
-               struct nvm_chk_meta *chunk = &line->chks[i];
-
-               if (chunk->state & NVM_CHK_ST_OFFLINE)
-                       continue;
-
-               written_secs += chunk->wp;
-               valid_chunks++;
-       }
-
-       if (lm->blk_per_line - nr_bb != valid_chunks)
-               pblk_err(pblk, "recovery line %d is bad\n", line->id);
-
-       pblk_update_line_wp(pblk, line, written_secs - lm->smeta_sec);
-
-       return written_secs;
-}
-
-struct pblk_recov_alloc {
-       struct ppa_addr *ppa_list;
-       void *meta_list;
-       struct nvm_rq *rqd;
-       void *data;
-       dma_addr_t dma_ppa_list;
-       dma_addr_t dma_meta_list;
-};
-
-static void pblk_recov_complete(struct kref *ref)
-{
-       struct pblk_pad_rq *pad_rq = container_of(ref, struct pblk_pad_rq, ref);
-
-       complete(&pad_rq->wait);
-}
-
-static void pblk_end_io_recov(struct nvm_rq *rqd)
-{
-       struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
-       struct pblk_pad_rq *pad_rq = rqd->private;
-       struct pblk *pblk = pad_rq->pblk;
-
-       pblk_up_chunk(pblk, ppa_list[0]);
-
-       pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
-
-       atomic_dec(&pblk->inflight_io);
-       kref_put(&pad_rq->ref, pblk_recov_complete);
-}
-
-/* pad line using line bitmap.  */
-static int pblk_recov_pad_line(struct pblk *pblk, struct pblk_line *line,
-                              int left_ppas)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       void *meta_list;
-       struct pblk_pad_rq *pad_rq;
-       struct nvm_rq *rqd;
-       struct ppa_addr *ppa_list;
-       void *data;
-       __le64 *lba_list = emeta_to_lbas(pblk, line->emeta->buf);
-       u64 w_ptr = line->cur_sec;
-       int left_line_ppas, rq_ppas;
-       int i, j;
-       int ret = 0;
-
-       spin_lock(&line->lock);
-       left_line_ppas = line->left_msecs;
-       spin_unlock(&line->lock);
-
-       pad_rq = kmalloc(sizeof(struct pblk_pad_rq), GFP_KERNEL);
-       if (!pad_rq)
-               return -ENOMEM;
-
-       data = vzalloc(array_size(pblk->max_write_pgs, geo->csecs));
-       if (!data) {
-               ret = -ENOMEM;
-               goto free_rq;
-       }
-
-       pad_rq->pblk = pblk;
-       init_completion(&pad_rq->wait);
-       kref_init(&pad_rq->ref);
-
-next_pad_rq:
-       rq_ppas = pblk_calc_secs(pblk, left_ppas, 0, false);
-       if (rq_ppas < pblk->min_write_pgs) {
-               pblk_err(pblk, "corrupted pad line %d\n", line->id);
-               goto fail_complete;
-       }
-
-       rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT);
-
-       ret = pblk_alloc_rqd_meta(pblk, rqd);
-       if (ret) {
-               pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
-               goto fail_complete;
-       }
-
-       rqd->bio = NULL;
-       rqd->opcode = NVM_OP_PWRITE;
-       rqd->is_seq = 1;
-       rqd->nr_ppas = rq_ppas;
-       rqd->end_io = pblk_end_io_recov;
-       rqd->private = pad_rq;
-
-       ppa_list = nvm_rq_to_ppa_list(rqd);
-       meta_list = rqd->meta_list;
-
-       for (i = 0; i < rqd->nr_ppas; ) {
-               struct ppa_addr ppa;
-               int pos;
-
-               w_ptr = pblk_alloc_page(pblk, line, pblk->min_write_pgs);
-               ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
-               pos = pblk_ppa_to_pos(geo, ppa);
-
-               while (test_bit(pos, line->blk_bitmap)) {
-                       w_ptr += pblk->min_write_pgs;
-                       ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
-                       pos = pblk_ppa_to_pos(geo, ppa);
-               }
-
-               for (j = 0; j < pblk->min_write_pgs; j++, i++, w_ptr++) {
-                       struct ppa_addr dev_ppa;
-                       struct pblk_sec_meta *meta;
-                       __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
-
-                       dev_ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
-
-                       pblk_map_invalidate(pblk, dev_ppa);
-                       lba_list[w_ptr] = addr_empty;
-                       meta = pblk_get_meta(pblk, meta_list, i);
-                       meta->lba = addr_empty;
-                       ppa_list[i] = dev_ppa;
-               }
-       }
-
-       kref_get(&pad_rq->ref);
-       pblk_down_chunk(pblk, ppa_list[0]);
-
-       ret = pblk_submit_io(pblk, rqd, data);
-       if (ret) {
-               pblk_err(pblk, "I/O submission failed: %d\n", ret);
-               pblk_up_chunk(pblk, ppa_list[0]);
-               kref_put(&pad_rq->ref, pblk_recov_complete);
-               pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
-               goto fail_complete;
-       }
-
-       left_line_ppas -= rq_ppas;
-       left_ppas -= rq_ppas;
-       if (left_ppas && left_line_ppas)
-               goto next_pad_rq;
-
-fail_complete:
-       kref_put(&pad_rq->ref, pblk_recov_complete);
-       wait_for_completion(&pad_rq->wait);
-
-       if (!pblk_line_is_full(line))
-               pblk_err(pblk, "corrupted padded line: %d\n", line->id);
-
-       vfree(data);
-free_rq:
-       kfree(pad_rq);
-       return ret;
-}
-
-static int pblk_pad_distance(struct pblk *pblk, struct pblk_line *line)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       int distance = geo->mw_cunits * geo->all_luns * geo->ws_opt;
-
-       return (distance > line->left_msecs) ? line->left_msecs : distance;
-}
-
-/* Return a chunk belonging to a line by stripe(write order) index */
-static struct nvm_chk_meta *pblk_get_stripe_chunk(struct pblk *pblk,
-                                                 struct pblk_line *line,
-                                                 int index)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct pblk_lun *rlun;
-       struct ppa_addr ppa;
-       int pos;
-
-       rlun = &pblk->luns[index];
-       ppa = rlun->bppa;
-       pos = pblk_ppa_to_pos(geo, ppa);
-
-       return &line->chks[pos];
-}
-
-static int pblk_line_wps_are_unbalanced(struct pblk *pblk,
-                                     struct pblk_line *line)
-{
-       struct pblk_line_meta *lm = &pblk->lm;
-       int blk_in_line = lm->blk_per_line;
-       struct nvm_chk_meta *chunk;
-       u64 max_wp, min_wp;
-       int i;
-
-       i = find_first_zero_bit(line->blk_bitmap, blk_in_line);
-
-       /* If there is one or zero good chunks in the line,
-        * the write pointers can't be unbalanced.
-        */
-       if (i >= (blk_in_line - 1))
-               return 0;
-
-       chunk = pblk_get_stripe_chunk(pblk, line, i);
-       max_wp = chunk->wp;
-       if (max_wp > pblk->max_write_pgs)
-               min_wp = max_wp - pblk->max_write_pgs;
-       else
-               min_wp = 0;
-
-       i = find_next_zero_bit(line->blk_bitmap, blk_in_line, i + 1);
-       while (i < blk_in_line) {
-               chunk = pblk_get_stripe_chunk(pblk, line, i);
-               if (chunk->wp > max_wp || chunk->wp < min_wp)
-                       return 1;
-
-               i = find_next_zero_bit(line->blk_bitmap, blk_in_line, i + 1);
-       }
-
-       return 0;
-}
-
-static int pblk_recov_scan_oob(struct pblk *pblk, struct pblk_line *line,
-                              struct pblk_recov_alloc p)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct pblk_line_meta *lm = &pblk->lm;
-       struct nvm_geo *geo = &dev->geo;
-       struct ppa_addr *ppa_list;
-       void *meta_list;
-       struct nvm_rq *rqd;
-       void *data;
-       dma_addr_t dma_ppa_list, dma_meta_list;
-       __le64 *lba_list;
-       u64 paddr = pblk_line_smeta_start(pblk, line) + lm->smeta_sec;
-       bool padded = false;
-       int rq_ppas;
-       int i, j;
-       int ret;
-       u64 left_ppas = pblk_sec_in_open_line(pblk, line) - lm->smeta_sec;
-
-       if (pblk_line_wps_are_unbalanced(pblk, line))
-               pblk_warn(pblk, "recovering unbalanced line (%d)\n", line->id);
-
-       ppa_list = p.ppa_list;
-       meta_list = p.meta_list;
-       rqd = p.rqd;
-       data = p.data;
-       dma_ppa_list = p.dma_ppa_list;
-       dma_meta_list = p.dma_meta_list;
-
-       lba_list = emeta_to_lbas(pblk, line->emeta->buf);
-
-next_rq:
-       memset(rqd, 0, pblk_g_rq_size);
-
-       rq_ppas = pblk_calc_secs(pblk, left_ppas, 0, false);
-       if (!rq_ppas)
-               rq_ppas = pblk->min_write_pgs;
-
-retry_rq:
-       rqd->bio = NULL;
-       rqd->opcode = NVM_OP_PREAD;
-       rqd->meta_list = meta_list;
-       rqd->nr_ppas = rq_ppas;
-       rqd->ppa_list = ppa_list;
-       rqd->dma_ppa_list = dma_ppa_list;
-       rqd->dma_meta_list = dma_meta_list;
-       ppa_list = nvm_rq_to_ppa_list(rqd);
-
-       if (pblk_io_aligned(pblk, rq_ppas))
-               rqd->is_seq = 1;
-
-       for (i = 0; i < rqd->nr_ppas; ) {
-               struct ppa_addr ppa;
-               int pos;
-
-               ppa = addr_to_gen_ppa(pblk, paddr, line->id);
-               pos = pblk_ppa_to_pos(geo, ppa);
-
-               while (test_bit(pos, line->blk_bitmap)) {
-                       paddr += pblk->min_write_pgs;
-                       ppa = addr_to_gen_ppa(pblk, paddr, line->id);
-                       pos = pblk_ppa_to_pos(geo, ppa);
-               }
-
-               for (j = 0; j < pblk->min_write_pgs; j++, i++)
-                       ppa_list[i] =
-                               addr_to_gen_ppa(pblk, paddr + j, line->id);
-       }
-
-       ret = pblk_submit_io_sync(pblk, rqd, data);
-       if (ret) {
-               pblk_err(pblk, "I/O submission failed: %d\n", ret);
-               return ret;
-       }
-
-       atomic_dec(&pblk->inflight_io);
-
-       /* If a read fails, do a best effort by padding the line and retrying */
-       if (rqd->error && rqd->error != NVM_RSP_WARN_HIGHECC) {
-               int pad_distance, ret;
-
-               if (padded) {
-                       pblk_log_read_err(pblk, rqd);
-                       return -EINTR;
-               }
-
-               pad_distance = pblk_pad_distance(pblk, line);
-               ret = pblk_recov_pad_line(pblk, line, pad_distance);
-               if (ret) {
-                       return ret;
-               }
-
-               padded = true;
-               goto retry_rq;
-       }
-
-       pblk_get_packed_meta(pblk, rqd);
-
-       for (i = 0; i < rqd->nr_ppas; i++) {
-               struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i);
-               u64 lba = le64_to_cpu(meta->lba);
-
-               lba_list[paddr++] = cpu_to_le64(lba);
-
-               if (lba == ADDR_EMPTY || lba >= pblk->capacity)
-                       continue;
-
-               line->nr_valid_lbas++;
-               pblk_update_map(pblk, lba, ppa_list[i]);
-       }
-
-       left_ppas -= rq_ppas;
-       if (left_ppas > 0)
-               goto next_rq;
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       WARN_ON(padded && !pblk_line_is_full(line));
-#endif
-
-       return 0;
-}
-
-/* Scan line for lbas on out of bound area */
-static int pblk_recov_l2p_from_oob(struct pblk *pblk, struct pblk_line *line)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct nvm_rq *rqd;
-       struct ppa_addr *ppa_list;
-       void *meta_list;
-       struct pblk_recov_alloc p;
-       void *data;
-       dma_addr_t dma_ppa_list, dma_meta_list;
-       int ret = 0;
-
-       meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, &dma_meta_list);
-       if (!meta_list)
-               return -ENOMEM;
-
-       ppa_list = (void *)(meta_list) + pblk_dma_meta_size(pblk);
-       dma_ppa_list = dma_meta_list + pblk_dma_meta_size(pblk);
-
-       data = kcalloc(pblk->max_write_pgs, geo->csecs, GFP_KERNEL);
-       if (!data) {
-               ret = -ENOMEM;
-               goto free_meta_list;
-       }
-
-       rqd = mempool_alloc(&pblk->r_rq_pool, GFP_KERNEL);
-       memset(rqd, 0, pblk_g_rq_size);
-
-       p.ppa_list = ppa_list;
-       p.meta_list = meta_list;
-       p.rqd = rqd;
-       p.data = data;
-       p.dma_ppa_list = dma_ppa_list;
-       p.dma_meta_list = dma_meta_list;
-
-       ret = pblk_recov_scan_oob(pblk, line, p);
-       if (ret) {
-               pblk_err(pblk, "could not recover L2P form OOB\n");
-               goto out;
-       }
-
-       if (pblk_line_is_full(line))
-               pblk_line_recov_close(pblk, line);
-
-out:
-       mempool_free(rqd, &pblk->r_rq_pool);
-       kfree(data);
-free_meta_list:
-       nvm_dev_dma_free(dev->parent, meta_list, dma_meta_list);
-
-       return ret;
-}
-
-/* Insert lines ordered by sequence number (seq_num) on list */
-static void pblk_recov_line_add_ordered(struct list_head *head,
-                                       struct pblk_line *line)
-{
-       struct pblk_line *t = NULL;
-
-       list_for_each_entry(t, head, list)
-               if (t->seq_nr > line->seq_nr)
-                       break;
-
-       __list_add(&line->list, t->list.prev, &t->list);
-}
-
-static u64 pblk_line_emeta_start(struct pblk *pblk, struct pblk_line *line)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct pblk_line_meta *lm = &pblk->lm;
-       unsigned int emeta_secs;
-       u64 emeta_start;
-       struct ppa_addr ppa;
-       int pos;
-
-       emeta_secs = lm->emeta_sec[0];
-       emeta_start = lm->sec_per_line;
-
-       while (emeta_secs) {
-               emeta_start--;
-               ppa = addr_to_gen_ppa(pblk, emeta_start, line->id);
-               pos = pblk_ppa_to_pos(geo, ppa);
-               if (!test_bit(pos, line->blk_bitmap))
-                       emeta_secs--;
-       }
-
-       return emeta_start;
-}
-
-static int pblk_recov_check_line_version(struct pblk *pblk,
-                                        struct line_emeta *emeta)
-{
-       struct line_header *header = &emeta->header;
-
-       if (header->version_major != EMETA_VERSION_MAJOR) {
-               pblk_err(pblk, "line major version mismatch: %d, expected: %d\n",
-                        header->version_major, EMETA_VERSION_MAJOR);
-               return 1;
-       }
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       if (header->version_minor > EMETA_VERSION_MINOR)
-               pblk_info(pblk, "newer line minor version found: %d\n",
-                               header->version_minor);
-#endif
-
-       return 0;
-}
-
-static void pblk_recov_wa_counters(struct pblk *pblk,
-                                  struct line_emeta *emeta)
-{
-       struct pblk_line_meta *lm = &pblk->lm;
-       struct line_header *header = &emeta->header;
-       struct wa_counters *wa = emeta_to_wa(lm, emeta);
-
-       /* WA counters were introduced in emeta version 0.2 */
-       if (header->version_major > 0 || header->version_minor >= 2) {
-               u64 user = le64_to_cpu(wa->user);
-               u64 pad = le64_to_cpu(wa->pad);
-               u64 gc = le64_to_cpu(wa->gc);
-
-               atomic64_set(&pblk->user_wa, user);
-               atomic64_set(&pblk->pad_wa, pad);
-               atomic64_set(&pblk->gc_wa, gc);
-
-               pblk->user_rst_wa = user;
-               pblk->pad_rst_wa = pad;
-               pblk->gc_rst_wa = gc;
-       }
-}
-
-static int pblk_line_was_written(struct pblk_line *line,
-                                struct pblk *pblk)
-{
-
-       struct pblk_line_meta *lm = &pblk->lm;
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct nvm_chk_meta *chunk;
-       struct ppa_addr bppa;
-       int smeta_blk;
-
-       if (line->state == PBLK_LINESTATE_BAD)
-               return 0;
-
-       smeta_blk = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
-       if (smeta_blk >= lm->blk_per_line)
-               return 0;
-
-       bppa = pblk->luns[smeta_blk].bppa;
-       chunk = &line->chks[pblk_ppa_to_pos(geo, bppa)];
-
-       if (chunk->state & NVM_CHK_ST_CLOSED ||
-           (chunk->state & NVM_CHK_ST_OPEN
-            && chunk->wp >= lm->smeta_sec))
-               return 1;
-
-       return 0;
-}
-
-static bool pblk_line_is_open(struct pblk *pblk, struct pblk_line *line)
-{
-       struct pblk_line_meta *lm = &pblk->lm;
-       int i;
-
-       for (i = 0; i < lm->blk_per_line; i++)
-               if (line->chks[i].state & NVM_CHK_ST_OPEN)
-                       return true;
-
-       return false;
-}
-
-struct pblk_line *pblk_recov_l2p(struct pblk *pblk)
-{
-       struct pblk_line_meta *lm = &pblk->lm;
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       struct pblk_line *line, *tline, *data_line = NULL;
-       struct pblk_smeta *smeta;
-       struct pblk_emeta *emeta;
-       struct line_smeta *smeta_buf;
-       int found_lines = 0, recovered_lines = 0, open_lines = 0;
-       int is_next = 0;
-       int meta_line;
-       int i, valid_uuid = 0;
-       LIST_HEAD(recov_list);
-
-       /* TODO: Implement FTL snapshot */
-
-       /* Scan recovery - takes place when FTL snapshot fails */
-       spin_lock(&l_mg->free_lock);
-       meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
-       set_bit(meta_line, &l_mg->meta_bitmap);
-       smeta = l_mg->sline_meta[meta_line];
-       emeta = l_mg->eline_meta[meta_line];
-       smeta_buf = (struct line_smeta *)smeta;
-       spin_unlock(&l_mg->free_lock);
-
-       /* Order data lines using their sequence number */
-       for (i = 0; i < l_mg->nr_lines; i++) {
-               u32 crc;
-
-               line = &pblk->lines[i];
-
-               memset(smeta, 0, lm->smeta_len);
-               line->smeta = smeta;
-               line->lun_bitmap = ((void *)(smeta_buf)) +
-                                               sizeof(struct line_smeta);
-
-               if (!pblk_line_was_written(line, pblk))
-                       continue;
-
-               /* Lines that cannot be read are assumed as not written here */
-               if (pblk_line_smeta_read(pblk, line))
-                       continue;
-
-               crc = pblk_calc_smeta_crc(pblk, smeta_buf);
-               if (le32_to_cpu(smeta_buf->crc) != crc)
-                       continue;
-
-               if (le32_to_cpu(smeta_buf->header.identifier) != PBLK_MAGIC)
-                       continue;
-
-               if (smeta_buf->header.version_major != SMETA_VERSION_MAJOR) {
-                       pblk_err(pblk, "found incompatible line version %u\n",
-                                       smeta_buf->header.version_major);
-                       return ERR_PTR(-EINVAL);
-               }
-
-               /* The first valid instance uuid is used for initialization */
-               if (!valid_uuid) {
-                       import_guid(&pblk->instance_uuid, smeta_buf->header.uuid);
-                       valid_uuid = 1;
-               }
-
-               if (!guid_equal(&pblk->instance_uuid,
-                               (guid_t *)&smeta_buf->header.uuid)) {
-                       pblk_debug(pblk, "ignore line %u due to uuid mismatch\n",
-                                       i);
-                       continue;
-               }
-
-               /* Update line metadata */
-               spin_lock(&line->lock);
-               line->id = le32_to_cpu(smeta_buf->header.id);
-               line->type = le16_to_cpu(smeta_buf->header.type);
-               line->seq_nr = le64_to_cpu(smeta_buf->seq_nr);
-               spin_unlock(&line->lock);
-
-               /* Update general metadata */
-               spin_lock(&l_mg->free_lock);
-               if (line->seq_nr >= l_mg->d_seq_nr)
-                       l_mg->d_seq_nr = line->seq_nr + 1;
-               l_mg->nr_free_lines--;
-               spin_unlock(&l_mg->free_lock);
-
-               if (pblk_line_recov_alloc(pblk, line))
-                       goto out;
-
-               pblk_recov_line_add_ordered(&recov_list, line);
-               found_lines++;
-               pblk_debug(pblk, "recovering data line %d, seq:%llu\n",
-                                               line->id, smeta_buf->seq_nr);
-       }
-
-       if (!found_lines) {
-               guid_gen(&pblk->instance_uuid);
-
-               spin_lock(&l_mg->free_lock);
-               WARN_ON_ONCE(!test_and_clear_bit(meta_line,
-                                                       &l_mg->meta_bitmap));
-               spin_unlock(&l_mg->free_lock);
-
-               goto out;
-       }
-
-       /* Verify closed blocks and recover this portion of L2P table*/
-       list_for_each_entry_safe(line, tline, &recov_list, list) {
-               recovered_lines++;
-
-               line->emeta_ssec = pblk_line_emeta_start(pblk, line);
-               line->emeta = emeta;
-               memset(line->emeta->buf, 0, lm->emeta_len[0]);
-
-               if (pblk_line_is_open(pblk, line)) {
-                       pblk_recov_l2p_from_oob(pblk, line);
-                       goto next;
-               }
-
-               if (pblk_line_emeta_read(pblk, line, line->emeta->buf)) {
-                       pblk_recov_l2p_from_oob(pblk, line);
-                       goto next;
-               }
-
-               if (pblk_recov_check_emeta(pblk, line->emeta->buf)) {
-                       pblk_recov_l2p_from_oob(pblk, line);
-                       goto next;
-               }
-
-               if (pblk_recov_check_line_version(pblk, line->emeta->buf))
-                       return ERR_PTR(-EINVAL);
-
-               pblk_recov_wa_counters(pblk, line->emeta->buf);
-
-               if (pblk_recov_l2p_from_emeta(pblk, line))
-                       pblk_recov_l2p_from_oob(pblk, line);
-
-next:
-               if (pblk_line_is_full(line)) {
-                       struct list_head *move_list;
-
-                       spin_lock(&line->lock);
-                       line->state = PBLK_LINESTATE_CLOSED;
-                       trace_pblk_line_state(pblk_disk_name(pblk), line->id,
-                                       line->state);
-                       move_list = pblk_line_gc_list(pblk, line);
-                       spin_unlock(&line->lock);
-
-                       spin_lock(&l_mg->gc_lock);
-                       list_move_tail(&line->list, move_list);
-                       spin_unlock(&l_mg->gc_lock);
-
-                       mempool_free(line->map_bitmap, l_mg->bitmap_pool);
-                       line->map_bitmap = NULL;
-                       line->smeta = NULL;
-                       line->emeta = NULL;
-               } else {
-                       spin_lock(&line->lock);
-                       line->state = PBLK_LINESTATE_OPEN;
-                       spin_unlock(&line->lock);
-
-                       line->emeta->mem = 0;
-                       atomic_set(&line->emeta->sync, 0);
-
-                       trace_pblk_line_state(pblk_disk_name(pblk), line->id,
-                                       line->state);
-
-                       data_line = line;
-                       line->meta_line = meta_line;
-
-                       open_lines++;
-               }
-       }
-
-       if (!open_lines) {
-               spin_lock(&l_mg->free_lock);
-               WARN_ON_ONCE(!test_and_clear_bit(meta_line,
-                                                       &l_mg->meta_bitmap));
-               spin_unlock(&l_mg->free_lock);
-       } else {
-               spin_lock(&l_mg->free_lock);
-               l_mg->data_line = data_line;
-               /* Allocate next line for preparation */
-               l_mg->data_next = pblk_line_get(pblk);
-               if (l_mg->data_next) {
-                       l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
-                       l_mg->data_next->type = PBLK_LINETYPE_DATA;
-                       is_next = 1;
-               }
-               spin_unlock(&l_mg->free_lock);
-       }
-
-       if (is_next)
-               pblk_line_erase(pblk, l_mg->data_next);
-
-out:
-       if (found_lines != recovered_lines)
-               pblk_err(pblk, "failed to recover all found lines %d/%d\n",
-                                               found_lines, recovered_lines);
-
-       return data_line;
-}
-
-/*
- * Pad current line
- */
-int pblk_recov_pad(struct pblk *pblk)
-{
-       struct pblk_line *line;
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       int left_msecs;
-       int ret = 0;
-
-       spin_lock(&l_mg->free_lock);
-       line = l_mg->data_line;
-       left_msecs = line->left_msecs;
-       spin_unlock(&l_mg->free_lock);
-
-       ret = pblk_recov_pad_line(pblk, line, left_msecs);
-       if (ret) {
-               pblk_err(pblk, "tear down padding failed (%d)\n", ret);
-               return ret;
-       }
-
-       pblk_line_close_meta(pblk, line);
-       return ret;
-}
diff --git a/drivers/lightnvm/pblk-rl.c b/drivers/lightnvm/pblk-rl.c
deleted file mode 100644 (file)
index a5f8bc2..0000000
+++ /dev/null
@@ -1,254 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2016 CNEX Labs
- * Initial release: Javier Gonzalez <javier@cnexlabs.com>
- *                  Matias Bjorling <matias@cnexlabs.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * pblk-rl.c - pblk's rate limiter for user I/O
- *
- */
-
-#include "pblk.h"
-
-static void pblk_rl_kick_u_timer(struct pblk_rl *rl)
-{
-       mod_timer(&rl->u_timer, jiffies + msecs_to_jiffies(5000));
-}
-
-int pblk_rl_is_limit(struct pblk_rl *rl)
-{
-       int rb_space;
-
-       rb_space = atomic_read(&rl->rb_space);
-
-       return (rb_space == 0);
-}
-
-int pblk_rl_user_may_insert(struct pblk_rl *rl, int nr_entries)
-{
-       int rb_user_cnt = atomic_read(&rl->rb_user_cnt);
-       int rb_space = atomic_read(&rl->rb_space);
-
-       if (unlikely(rb_space >= 0) && (rb_space - nr_entries < 0))
-               return NVM_IO_ERR;
-
-       if (rb_user_cnt >= rl->rb_user_max)
-               return NVM_IO_REQUEUE;
-
-       return NVM_IO_OK;
-}
-
-void pblk_rl_inserted(struct pblk_rl *rl, int nr_entries)
-{
-       int rb_space = atomic_read(&rl->rb_space);
-
-       if (unlikely(rb_space >= 0))
-               atomic_sub(nr_entries, &rl->rb_space);
-}
-
-int pblk_rl_gc_may_insert(struct pblk_rl *rl, int nr_entries)
-{
-       int rb_gc_cnt = atomic_read(&rl->rb_gc_cnt);
-       int rb_user_active;
-
-       /* If there is no user I/O let GC take over space on the write buffer */
-       rb_user_active = READ_ONCE(rl->rb_user_active);
-       return (!(rb_gc_cnt >= rl->rb_gc_max && rb_user_active));
-}
-
-void pblk_rl_user_in(struct pblk_rl *rl, int nr_entries)
-{
-       atomic_add(nr_entries, &rl->rb_user_cnt);
-
-       /* Release user I/O state. Protect from GC */
-       smp_store_release(&rl->rb_user_active, 1);
-       pblk_rl_kick_u_timer(rl);
-}
-
-void pblk_rl_werr_line_in(struct pblk_rl *rl)
-{
-       atomic_inc(&rl->werr_lines);
-}
-
-void pblk_rl_werr_line_out(struct pblk_rl *rl)
-{
-       atomic_dec(&rl->werr_lines);
-}
-
-void pblk_rl_gc_in(struct pblk_rl *rl, int nr_entries)
-{
-       atomic_add(nr_entries, &rl->rb_gc_cnt);
-}
-
-void pblk_rl_out(struct pblk_rl *rl, int nr_user, int nr_gc)
-{
-       atomic_sub(nr_user, &rl->rb_user_cnt);
-       atomic_sub(nr_gc, &rl->rb_gc_cnt);
-}
-
-unsigned long pblk_rl_nr_free_blks(struct pblk_rl *rl)
-{
-       return atomic_read(&rl->free_blocks);
-}
-
-unsigned long pblk_rl_nr_user_free_blks(struct pblk_rl *rl)
-{
-       return atomic_read(&rl->free_user_blocks);
-}
-
-static void __pblk_rl_update_rates(struct pblk_rl *rl,
-                                  unsigned long free_blocks)
-{
-       struct pblk *pblk = container_of(rl, struct pblk, rl);
-       int max = rl->rb_budget;
-       int werr_gc_needed = atomic_read(&rl->werr_lines);
-
-       if (free_blocks >= rl->high) {
-               if (werr_gc_needed) {
-                       /* Allocate a small budget for recovering
-                        * lines with write errors
-                        */
-                       rl->rb_gc_max = 1 << rl->rb_windows_pw;
-                       rl->rb_user_max = max - rl->rb_gc_max;
-                       rl->rb_state = PBLK_RL_WERR;
-               } else {
-                       rl->rb_user_max = max;
-                       rl->rb_gc_max = 0;
-                       rl->rb_state = PBLK_RL_OFF;
-               }
-       } else if (free_blocks < rl->high) {
-               int shift = rl->high_pw - rl->rb_windows_pw;
-               int user_windows = free_blocks >> shift;
-               int user_max = user_windows << ilog2(NVM_MAX_VLBA);
-
-               rl->rb_user_max = user_max;
-               rl->rb_gc_max = max - user_max;
-
-               if (free_blocks <= rl->rsv_blocks) {
-                       rl->rb_user_max = 0;
-                       rl->rb_gc_max = max;
-               }
-
-               /* In the worst case, we will need to GC lines in the low list
-                * (high valid sector count). If there are lines to GC on high
-                * or mid lists, these will be prioritized
-                */
-               rl->rb_state = PBLK_RL_LOW;
-       }
-
-       if (rl->rb_state != PBLK_RL_OFF)
-               pblk_gc_should_start(pblk);
-       else
-               pblk_gc_should_stop(pblk);
-}
-
-void pblk_rl_update_rates(struct pblk_rl *rl)
-{
-       __pblk_rl_update_rates(rl, pblk_rl_nr_user_free_blks(rl));
-}
-
-void pblk_rl_free_lines_inc(struct pblk_rl *rl, struct pblk_line *line)
-{
-       int blk_in_line = atomic_read(&line->blk_in_line);
-       int free_blocks;
-
-       atomic_add(blk_in_line, &rl->free_blocks);
-       free_blocks = atomic_add_return(blk_in_line, &rl->free_user_blocks);
-
-       __pblk_rl_update_rates(rl, free_blocks);
-}
-
-void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line,
-                           bool used)
-{
-       int blk_in_line = atomic_read(&line->blk_in_line);
-       int free_blocks;
-
-       atomic_sub(blk_in_line, &rl->free_blocks);
-
-       if (used)
-               free_blocks = atomic_sub_return(blk_in_line,
-                                                       &rl->free_user_blocks);
-       else
-               free_blocks = atomic_read(&rl->free_user_blocks);
-
-       __pblk_rl_update_rates(rl, free_blocks);
-}
-
-int pblk_rl_high_thrs(struct pblk_rl *rl)
-{
-       return rl->high;
-}
-
-int pblk_rl_max_io(struct pblk_rl *rl)
-{
-       return rl->rb_max_io;
-}
-
-static void pblk_rl_u_timer(struct timer_list *t)
-{
-       struct pblk_rl *rl = from_timer(rl, t, u_timer);
-
-       /* Release user I/O state. Protect from GC */
-       smp_store_release(&rl->rb_user_active, 0);
-}
-
-void pblk_rl_free(struct pblk_rl *rl)
-{
-       del_timer(&rl->u_timer);
-}
-
-void pblk_rl_init(struct pblk_rl *rl, int budget, int threshold)
-{
-       struct pblk *pblk = container_of(rl, struct pblk, rl);
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       struct pblk_line_meta *lm = &pblk->lm;
-       int sec_meta, blk_meta;
-       unsigned int rb_windows;
-
-       /* Consider sectors used for metadata */
-       sec_meta = (lm->smeta_sec + lm->emeta_sec[0]) * l_mg->nr_free_lines;
-       blk_meta = DIV_ROUND_UP(sec_meta, geo->clba);
-
-       rl->high = pblk->op_blks - blk_meta - lm->blk_per_line;
-       rl->high_pw = get_count_order(rl->high);
-
-       rl->rsv_blocks = pblk_get_min_chks(pblk);
-
-       /* This will always be a power-of-2 */
-       rb_windows = budget / NVM_MAX_VLBA;
-       rl->rb_windows_pw = get_count_order(rb_windows);
-
-       /* To start with, all buffer is available to user I/O writers */
-       rl->rb_budget = budget;
-       rl->rb_user_max = budget;
-       rl->rb_gc_max = 0;
-       rl->rb_state = PBLK_RL_HIGH;
-
-       /* Maximize I/O size and ansure that back threshold is respected */
-       if (threshold)
-               rl->rb_max_io = budget - pblk->min_write_pgs_data - threshold;
-       else
-               rl->rb_max_io = budget - pblk->min_write_pgs_data - 1;
-
-       atomic_set(&rl->rb_user_cnt, 0);
-       atomic_set(&rl->rb_gc_cnt, 0);
-       atomic_set(&rl->rb_space, -1);
-       atomic_set(&rl->werr_lines, 0);
-
-       timer_setup(&rl->u_timer, pblk_rl_u_timer, 0);
-
-       rl->rb_user_active = 0;
-       rl->rb_gc_active = 0;
-}
diff --git a/drivers/lightnvm/pblk-sysfs.c b/drivers/lightnvm/pblk-sysfs.c
deleted file mode 100644 (file)
index 6387302..0000000
+++ /dev/null
@@ -1,728 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2016 CNEX Labs
- * Initial release: Javier Gonzalez <javier@cnexlabs.com>
- *                  Matias Bjorling <matias@cnexlabs.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * Implementation of a physical block-device target for Open-channel SSDs.
- *
- * pblk-sysfs.c - pblk's sysfs
- *
- */
-
-#include "pblk.h"
-
-static ssize_t pblk_sysfs_luns_show(struct pblk *pblk, char *page)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct pblk_lun *rlun;
-       ssize_t sz = 0;
-       int i;
-
-       for (i = 0; i < geo->all_luns; i++) {
-               int active = 1;
-
-               rlun = &pblk->luns[i];
-               if (!down_trylock(&rlun->wr_sem)) {
-                       active = 0;
-                       up(&rlun->wr_sem);
-               }
-               sz += scnprintf(page + sz, PAGE_SIZE - sz,
-                               "pblk: pos:%d, ch:%d, lun:%d - %d\n",
-                                       i,
-                                       rlun->bppa.a.ch,
-                                       rlun->bppa.a.lun,
-                                       active);
-       }
-
-       return sz;
-}
-
-static ssize_t pblk_sysfs_rate_limiter(struct pblk *pblk, char *page)
-{
-       int free_blocks, free_user_blocks, total_blocks;
-       int rb_user_max, rb_user_cnt;
-       int rb_gc_max, rb_gc_cnt, rb_budget, rb_state;
-
-       free_blocks = pblk_rl_nr_free_blks(&pblk->rl);
-       free_user_blocks = pblk_rl_nr_user_free_blks(&pblk->rl);
-       rb_user_max = pblk->rl.rb_user_max;
-       rb_user_cnt = atomic_read(&pblk->rl.rb_user_cnt);
-       rb_gc_max = pblk->rl.rb_gc_max;
-       rb_gc_cnt = atomic_read(&pblk->rl.rb_gc_cnt);
-       rb_budget = pblk->rl.rb_budget;
-       rb_state = pblk->rl.rb_state;
-
-       total_blocks = pblk->rl.total_blocks;
-
-       return snprintf(page, PAGE_SIZE,
-               "u:%u/%u,gc:%u/%u(%u)(stop:<%u,full:>%u,free:%d/%d/%d)-%d\n",
-                               rb_user_cnt,
-                               rb_user_max,
-                               rb_gc_cnt,
-                               rb_gc_max,
-                               rb_state,
-                               rb_budget,
-                               pblk->rl.high,
-                               free_blocks,
-                               free_user_blocks,
-                               total_blocks,
-                               READ_ONCE(pblk->rl.rb_user_active));
-}
-
-static ssize_t pblk_sysfs_gc_state_show(struct pblk *pblk, char *page)
-{
-       int gc_enabled, gc_active;
-
-       pblk_gc_sysfs_state_show(pblk, &gc_enabled, &gc_active);
-       return snprintf(page, PAGE_SIZE, "gc_enabled=%d, gc_active=%d\n",
-                                       gc_enabled, gc_active);
-}
-
-static ssize_t pblk_sysfs_stats(struct pblk *pblk, char *page)
-{
-       ssize_t sz;
-
-       sz = snprintf(page, PAGE_SIZE,
-                       "read_failed=%lu, read_high_ecc=%lu, read_empty=%lu, read_failed_gc=%lu, write_failed=%lu, erase_failed=%lu\n",
-                       atomic_long_read(&pblk->read_failed),
-                       atomic_long_read(&pblk->read_high_ecc),
-                       atomic_long_read(&pblk->read_empty),
-                       atomic_long_read(&pblk->read_failed_gc),
-                       atomic_long_read(&pblk->write_failed),
-                       atomic_long_read(&pblk->erase_failed));
-
-       return sz;
-}
-
-static ssize_t pblk_sysfs_write_buffer(struct pblk *pblk, char *page)
-{
-       return pblk_rb_sysfs(&pblk->rwb, page);
-}
-
-static ssize_t pblk_sysfs_ppaf(struct pblk *pblk, char *page)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       ssize_t sz = 0;
-
-       if (geo->version == NVM_OCSSD_SPEC_12) {
-               struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&pblk->addrf;
-               struct nvm_addrf_12 *gppaf = (struct nvm_addrf_12 *)&geo->addrf;
-
-               sz = scnprintf(page, PAGE_SIZE,
-                       "g:(b:%d)blk:%d/%d,pg:%d/%d,lun:%d/%d,ch:%d/%d,pl:%d/%d,sec:%d/%d\n",
-                       pblk->addrf_len,
-                       ppaf->blk_offset, ppaf->blk_len,
-                       ppaf->pg_offset, ppaf->pg_len,
-                       ppaf->lun_offset, ppaf->lun_len,
-                       ppaf->ch_offset, ppaf->ch_len,
-                       ppaf->pln_offset, ppaf->pln_len,
-                       ppaf->sec_offset, ppaf->sec_len);
-
-               sz += scnprintf(page + sz, PAGE_SIZE - sz,
-                       "d:blk:%d/%d,pg:%d/%d,lun:%d/%d,ch:%d/%d,pl:%d/%d,sec:%d/%d\n",
-                       gppaf->blk_offset, gppaf->blk_len,
-                       gppaf->pg_offset, gppaf->pg_len,
-                       gppaf->lun_offset, gppaf->lun_len,
-                       gppaf->ch_offset, gppaf->ch_len,
-                       gppaf->pln_offset, gppaf->pln_len,
-                       gppaf->sec_offset, gppaf->sec_len);
-       } else {
-               struct nvm_addrf *ppaf = &pblk->addrf;
-               struct nvm_addrf *gppaf = &geo->addrf;
-
-               sz = scnprintf(page, PAGE_SIZE,
-                       "pblk:(s:%d)ch:%d/%d,lun:%d/%d,chk:%d/%d/sec:%d/%d\n",
-                       pblk->addrf_len,
-                       ppaf->ch_offset, ppaf->ch_len,
-                       ppaf->lun_offset, ppaf->lun_len,
-                       ppaf->chk_offset, ppaf->chk_len,
-                       ppaf->sec_offset, ppaf->sec_len);
-
-               sz += scnprintf(page + sz, PAGE_SIZE - sz,
-                       "device:ch:%d/%d,lun:%d/%d,chk:%d/%d,sec:%d/%d\n",
-                       gppaf->ch_offset, gppaf->ch_len,
-                       gppaf->lun_offset, gppaf->lun_len,
-                       gppaf->chk_offset, gppaf->chk_len,
-                       gppaf->sec_offset, gppaf->sec_len);
-       }
-
-       return sz;
-}
-
-static ssize_t pblk_sysfs_lines(struct pblk *pblk, char *page)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct pblk_line_meta *lm = &pblk->lm;
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       struct pblk_line *line;
-       ssize_t sz = 0;
-       int nr_free_lines;
-       int cur_data, cur_log;
-       int free_line_cnt = 0, closed_line_cnt = 0, emeta_line_cnt = 0;
-       int d_line_cnt = 0, l_line_cnt = 0;
-       int gc_full = 0, gc_high = 0, gc_mid = 0, gc_low = 0, gc_empty = 0;
-       int gc_werr = 0;
-
-       int bad = 0, cor = 0;
-       int msecs = 0, cur_sec = 0, vsc = 0, sec_in_line = 0;
-       int map_weight = 0, meta_weight = 0;
-
-       spin_lock(&l_mg->free_lock);
-       cur_data = (l_mg->data_line) ? l_mg->data_line->id : -1;
-       cur_log = (l_mg->log_line) ? l_mg->log_line->id : -1;
-       nr_free_lines = l_mg->nr_free_lines;
-
-       list_for_each_entry(line, &l_mg->free_list, list)
-               free_line_cnt++;
-       spin_unlock(&l_mg->free_lock);
-
-       spin_lock(&l_mg->close_lock);
-       list_for_each_entry(line, &l_mg->emeta_list, list)
-               emeta_line_cnt++;
-       spin_unlock(&l_mg->close_lock);
-
-       spin_lock(&l_mg->gc_lock);
-       list_for_each_entry(line, &l_mg->gc_full_list, list) {
-               if (line->type == PBLK_LINETYPE_DATA)
-                       d_line_cnt++;
-               else if (line->type == PBLK_LINETYPE_LOG)
-                       l_line_cnt++;
-               closed_line_cnt++;
-               gc_full++;
-       }
-
-       list_for_each_entry(line, &l_mg->gc_high_list, list) {
-               if (line->type == PBLK_LINETYPE_DATA)
-                       d_line_cnt++;
-               else if (line->type == PBLK_LINETYPE_LOG)
-                       l_line_cnt++;
-               closed_line_cnt++;
-               gc_high++;
-       }
-
-       list_for_each_entry(line, &l_mg->gc_mid_list, list) {
-               if (line->type == PBLK_LINETYPE_DATA)
-                       d_line_cnt++;
-               else if (line->type == PBLK_LINETYPE_LOG)
-                       l_line_cnt++;
-               closed_line_cnt++;
-               gc_mid++;
-       }
-
-       list_for_each_entry(line, &l_mg->gc_low_list, list) {
-               if (line->type == PBLK_LINETYPE_DATA)
-                       d_line_cnt++;
-               else if (line->type == PBLK_LINETYPE_LOG)
-                       l_line_cnt++;
-               closed_line_cnt++;
-               gc_low++;
-       }
-
-       list_for_each_entry(line, &l_mg->gc_empty_list, list) {
-               if (line->type == PBLK_LINETYPE_DATA)
-                       d_line_cnt++;
-               else if (line->type == PBLK_LINETYPE_LOG)
-                       l_line_cnt++;
-               closed_line_cnt++;
-               gc_empty++;
-       }
-
-       list_for_each_entry(line, &l_mg->gc_werr_list, list) {
-               if (line->type == PBLK_LINETYPE_DATA)
-                       d_line_cnt++;
-               else if (line->type == PBLK_LINETYPE_LOG)
-                       l_line_cnt++;
-               closed_line_cnt++;
-               gc_werr++;
-       }
-
-       list_for_each_entry(line, &l_mg->bad_list, list)
-               bad++;
-       list_for_each_entry(line, &l_mg->corrupt_list, list)
-               cor++;
-       spin_unlock(&l_mg->gc_lock);
-
-       spin_lock(&l_mg->free_lock);
-       if (l_mg->data_line) {
-               cur_sec = l_mg->data_line->cur_sec;
-               msecs = l_mg->data_line->left_msecs;
-               vsc = le32_to_cpu(*l_mg->data_line->vsc);
-               sec_in_line = l_mg->data_line->sec_in_line;
-               meta_weight = bitmap_weight(&l_mg->meta_bitmap,
-                                                       PBLK_DATA_LINES);
-
-               spin_lock(&l_mg->data_line->lock);
-               if (l_mg->data_line->map_bitmap)
-                       map_weight = bitmap_weight(l_mg->data_line->map_bitmap,
-                                                       lm->sec_per_line);
-               else
-                       map_weight = 0;
-               spin_unlock(&l_mg->data_line->lock);
-       }
-       spin_unlock(&l_mg->free_lock);
-
-       if (nr_free_lines != free_line_cnt)
-               pblk_err(pblk, "corrupted free line list:%d/%d\n",
-                                               nr_free_lines, free_line_cnt);
-
-       sz = scnprintf(page, PAGE_SIZE - sz,
-               "line: nluns:%d, nblks:%d, nsecs:%d\n",
-               geo->all_luns, lm->blk_per_line, lm->sec_per_line);
-
-       sz += scnprintf(page + sz, PAGE_SIZE - sz,
-               "lines:d:%d,l:%d-f:%d,m:%d/%d,c:%d,b:%d,co:%d(d:%d,l:%d)t:%d\n",
-                                       cur_data, cur_log,
-                                       nr_free_lines,
-                                       emeta_line_cnt, meta_weight,
-                                       closed_line_cnt,
-                                       bad, cor,
-                                       d_line_cnt, l_line_cnt,
-                                       l_mg->nr_lines);
-
-       sz += scnprintf(page + sz, PAGE_SIZE - sz,
-               "GC: full:%d, high:%d, mid:%d, low:%d, empty:%d, werr: %d, queue:%d\n",
-                       gc_full, gc_high, gc_mid, gc_low, gc_empty, gc_werr,
-                       atomic_read(&pblk->gc.read_inflight_gc));
-
-       sz += scnprintf(page + sz, PAGE_SIZE - sz,
-               "data (%d) cur:%d, left:%d, vsc:%d, s:%d, map:%d/%d (%d)\n",
-                       cur_data, cur_sec, msecs, vsc, sec_in_line,
-                       map_weight, lm->sec_per_line,
-                       atomic_read(&pblk->inflight_io));
-
-       return sz;
-}
-
-static ssize_t pblk_sysfs_lines_info(struct pblk *pblk, char *page)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct pblk_line_meta *lm = &pblk->lm;
-       ssize_t sz = 0;
-
-       sz = scnprintf(page, PAGE_SIZE - sz,
-                               "smeta - len:%d, secs:%d\n",
-                                       lm->smeta_len, lm->smeta_sec);
-       sz += scnprintf(page + sz, PAGE_SIZE - sz,
-                               "emeta - len:%d, sec:%d, bb_start:%d\n",
-                                       lm->emeta_len[0], lm->emeta_sec[0],
-                                       lm->emeta_bb);
-       sz += scnprintf(page + sz, PAGE_SIZE - sz,
-                               "bitmap lengths: sec:%d, blk:%d, lun:%d\n",
-                                       lm->sec_bitmap_len,
-                                       lm->blk_bitmap_len,
-                                       lm->lun_bitmap_len);
-       sz += scnprintf(page + sz, PAGE_SIZE - sz,
-                               "blk_line:%d, sec_line:%d, sec_blk:%d\n",
-                                       lm->blk_per_line,
-                                       lm->sec_per_line,
-                                       geo->clba);
-
-       return sz;
-}
-
-static ssize_t pblk_sysfs_get_sec_per_write(struct pblk *pblk, char *page)
-{
-       return snprintf(page, PAGE_SIZE, "%d\n", pblk->sec_per_write);
-}
-
-static ssize_t pblk_get_write_amp(u64 user, u64 gc, u64 pad,
-                                 char *page)
-{
-       int sz;
-
-       sz = scnprintf(page, PAGE_SIZE,
-                       "user:%lld gc:%lld pad:%lld WA:",
-                       user, gc, pad);
-
-       if (!user) {
-               sz += scnprintf(page + sz, PAGE_SIZE - sz, "NaN\n");
-       } else {
-               u64 wa_int;
-               u32 wa_frac;
-
-               wa_int = (user + gc + pad) * 100000;
-               wa_int = div64_u64(wa_int, user);
-               wa_int = div_u64_rem(wa_int, 100000, &wa_frac);
-
-               sz += scnprintf(page + sz, PAGE_SIZE - sz, "%llu.%05u\n",
-                                                       wa_int, wa_frac);
-       }
-
-       return sz;
-}
-
-static ssize_t pblk_sysfs_get_write_amp_mileage(struct pblk *pblk, char *page)
-{
-       return pblk_get_write_amp(atomic64_read(&pblk->user_wa),
-               atomic64_read(&pblk->gc_wa), atomic64_read(&pblk->pad_wa),
-               page);
-}
-
-static ssize_t pblk_sysfs_get_write_amp_trip(struct pblk *pblk, char *page)
-{
-       return pblk_get_write_amp(
-               atomic64_read(&pblk->user_wa) - pblk->user_rst_wa,
-               atomic64_read(&pblk->gc_wa) - pblk->gc_rst_wa,
-               atomic64_read(&pblk->pad_wa) - pblk->pad_rst_wa, page);
-}
-
-static long long bucket_percentage(unsigned long long bucket,
-                                  unsigned long long total)
-{
-       int p = bucket * 100;
-
-       p = div_u64(p, total);
-
-       return p;
-}
-
-static ssize_t pblk_sysfs_get_padding_dist(struct pblk *pblk, char *page)
-{
-       int sz = 0;
-       unsigned long long total;
-       unsigned long long total_buckets = 0;
-       int buckets = pblk->min_write_pgs - 1;
-       int i;
-
-       total = atomic64_read(&pblk->nr_flush) - pblk->nr_flush_rst;
-       if (!total) {
-               for (i = 0; i < (buckets + 1); i++)
-                       sz += scnprintf(page + sz, PAGE_SIZE - sz,
-                               "%d:0 ", i);
-               sz += scnprintf(page + sz, PAGE_SIZE - sz, "\n");
-
-               return sz;
-       }
-
-       for (i = 0; i < buckets; i++)
-               total_buckets += atomic64_read(&pblk->pad_dist[i]);
-
-       sz += scnprintf(page + sz, PAGE_SIZE - sz, "0:%lld%% ",
-               bucket_percentage(total - total_buckets, total));
-
-       for (i = 0; i < buckets; i++) {
-               unsigned long long p;
-
-               p = bucket_percentage(atomic64_read(&pblk->pad_dist[i]),
-                                         total);
-               sz += scnprintf(page + sz, PAGE_SIZE - sz, "%d:%lld%% ",
-                               i + 1, p);
-       }
-       sz += scnprintf(page + sz, PAGE_SIZE - sz, "\n");
-
-       return sz;
-}
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-static ssize_t pblk_sysfs_stats_debug(struct pblk *pblk, char *page)
-{
-       return snprintf(page, PAGE_SIZE,
-               "%lu\t%lu\t%ld\t%llu\t%ld\t%lu\t%lu\t%lu\t%lu\t%lu\t%lu\t%lu\t%lu\n",
-                       atomic_long_read(&pblk->inflight_writes),
-                       atomic_long_read(&pblk->inflight_reads),
-                       atomic_long_read(&pblk->req_writes),
-                       (u64)atomic64_read(&pblk->nr_flush),
-                       atomic_long_read(&pblk->padded_writes),
-                       atomic_long_read(&pblk->padded_wb),
-                       atomic_long_read(&pblk->sub_writes),
-                       atomic_long_read(&pblk->sync_writes),
-                       atomic_long_read(&pblk->recov_writes),
-                       atomic_long_read(&pblk->recov_gc_writes),
-                       atomic_long_read(&pblk->recov_gc_reads),
-                       atomic_long_read(&pblk->cache_reads),
-                       atomic_long_read(&pblk->sync_reads));
-}
-#endif
-
-static ssize_t pblk_sysfs_gc_force(struct pblk *pblk, const char *page,
-                                  size_t len)
-{
-       size_t c_len;
-       int force;
-
-       c_len = strcspn(page, "\n");
-       if (c_len >= len)
-               return -EINVAL;
-
-       if (kstrtouint(page, 0, &force))
-               return -EINVAL;
-
-       pblk_gc_sysfs_force(pblk, force);
-
-       return len;
-}
-
-static ssize_t pblk_sysfs_set_sec_per_write(struct pblk *pblk,
-                                            const char *page, size_t len)
-{
-       size_t c_len;
-       int sec_per_write;
-
-       c_len = strcspn(page, "\n");
-       if (c_len >= len)
-               return -EINVAL;
-
-       if (kstrtouint(page, 0, &sec_per_write))
-               return -EINVAL;
-
-       if (!pblk_is_oob_meta_supported(pblk)) {
-               /* For packed metadata case it is
-                * not allowed to change sec_per_write.
-                */
-               return -EINVAL;
-       }
-
-       if (sec_per_write < pblk->min_write_pgs
-                               || sec_per_write > pblk->max_write_pgs
-                               || sec_per_write % pblk->min_write_pgs != 0)
-               return -EINVAL;
-
-       pblk_set_sec_per_write(pblk, sec_per_write);
-
-       return len;
-}
-
-static ssize_t pblk_sysfs_set_write_amp_trip(struct pblk *pblk,
-                       const char *page, size_t len)
-{
-       size_t c_len;
-       int reset_value;
-
-       c_len = strcspn(page, "\n");
-       if (c_len >= len)
-               return -EINVAL;
-
-       if (kstrtouint(page, 0, &reset_value))
-               return -EINVAL;
-
-       if (reset_value !=  0)
-               return -EINVAL;
-
-       pblk->user_rst_wa = atomic64_read(&pblk->user_wa);
-       pblk->pad_rst_wa = atomic64_read(&pblk->pad_wa);
-       pblk->gc_rst_wa = atomic64_read(&pblk->gc_wa);
-
-       return len;
-}
-
-
-static ssize_t pblk_sysfs_set_padding_dist(struct pblk *pblk,
-                       const char *page, size_t len)
-{
-       size_t c_len;
-       int reset_value;
-       int buckets = pblk->min_write_pgs - 1;
-       int i;
-
-       c_len = strcspn(page, "\n");
-       if (c_len >= len)
-               return -EINVAL;
-
-       if (kstrtouint(page, 0, &reset_value))
-               return -EINVAL;
-
-       if (reset_value !=  0)
-               return -EINVAL;
-
-       for (i = 0; i < buckets; i++)
-               atomic64_set(&pblk->pad_dist[i], 0);
-
-       pblk->nr_flush_rst = atomic64_read(&pblk->nr_flush);
-
-       return len;
-}
-
-static struct attribute sys_write_luns = {
-       .name = "write_luns",
-       .mode = 0444,
-};
-
-static struct attribute sys_rate_limiter_attr = {
-       .name = "rate_limiter",
-       .mode = 0444,
-};
-
-static struct attribute sys_gc_state = {
-       .name = "gc_state",
-       .mode = 0444,
-};
-
-static struct attribute sys_errors_attr = {
-       .name = "errors",
-       .mode = 0444,
-};
-
-static struct attribute sys_rb_attr = {
-       .name = "write_buffer",
-       .mode = 0444,
-};
-
-static struct attribute sys_stats_ppaf_attr = {
-       .name = "ppa_format",
-       .mode = 0444,
-};
-
-static struct attribute sys_lines_attr = {
-       .name = "lines",
-       .mode = 0444,
-};
-
-static struct attribute sys_lines_info_attr = {
-       .name = "lines_info",
-       .mode = 0444,
-};
-
-static struct attribute sys_gc_force = {
-       .name = "gc_force",
-       .mode = 0200,
-};
-
-static struct attribute sys_max_sec_per_write = {
-       .name = "max_sec_per_write",
-       .mode = 0644,
-};
-
-static struct attribute sys_write_amp_mileage = {
-       .name = "write_amp_mileage",
-       .mode = 0444,
-};
-
-static struct attribute sys_write_amp_trip = {
-       .name = "write_amp_trip",
-       .mode = 0644,
-};
-
-static struct attribute sys_padding_dist = {
-       .name = "padding_dist",
-       .mode = 0644,
-};
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-static struct attribute sys_stats_debug_attr = {
-       .name = "stats",
-       .mode = 0444,
-};
-#endif
-
-static struct attribute *pblk_attrs[] = {
-       &sys_write_luns,
-       &sys_rate_limiter_attr,
-       &sys_errors_attr,
-       &sys_gc_state,
-       &sys_gc_force,
-       &sys_max_sec_per_write,
-       &sys_rb_attr,
-       &sys_stats_ppaf_attr,
-       &sys_lines_attr,
-       &sys_lines_info_attr,
-       &sys_write_amp_mileage,
-       &sys_write_amp_trip,
-       &sys_padding_dist,
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       &sys_stats_debug_attr,
-#endif
-       NULL,
-};
-
-static ssize_t pblk_sysfs_show(struct kobject *kobj, struct attribute *attr,
-                              char *buf)
-{
-       struct pblk *pblk = container_of(kobj, struct pblk, kobj);
-
-       if (strcmp(attr->name, "rate_limiter") == 0)
-               return pblk_sysfs_rate_limiter(pblk, buf);
-       else if (strcmp(attr->name, "write_luns") == 0)
-               return pblk_sysfs_luns_show(pblk, buf);
-       else if (strcmp(attr->name, "gc_state") == 0)
-               return pblk_sysfs_gc_state_show(pblk, buf);
-       else if (strcmp(attr->name, "errors") == 0)
-               return pblk_sysfs_stats(pblk, buf);
-       else if (strcmp(attr->name, "write_buffer") == 0)
-               return pblk_sysfs_write_buffer(pblk, buf);
-       else if (strcmp(attr->name, "ppa_format") == 0)
-               return pblk_sysfs_ppaf(pblk, buf);
-       else if (strcmp(attr->name, "lines") == 0)
-               return pblk_sysfs_lines(pblk, buf);
-       else if (strcmp(attr->name, "lines_info") == 0)
-               return pblk_sysfs_lines_info(pblk, buf);
-       else if (strcmp(attr->name, "max_sec_per_write") == 0)
-               return pblk_sysfs_get_sec_per_write(pblk, buf);
-       else if (strcmp(attr->name, "write_amp_mileage") == 0)
-               return pblk_sysfs_get_write_amp_mileage(pblk, buf);
-       else if (strcmp(attr->name, "write_amp_trip") == 0)
-               return pblk_sysfs_get_write_amp_trip(pblk, buf);
-       else if (strcmp(attr->name, "padding_dist") == 0)
-               return pblk_sysfs_get_padding_dist(pblk, buf);
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       else if (strcmp(attr->name, "stats") == 0)
-               return pblk_sysfs_stats_debug(pblk, buf);
-#endif
-       return 0;
-}
-
-static ssize_t pblk_sysfs_store(struct kobject *kobj, struct attribute *attr,
-                               const char *buf, size_t len)
-{
-       struct pblk *pblk = container_of(kobj, struct pblk, kobj);
-
-       if (strcmp(attr->name, "gc_force") == 0)
-               return pblk_sysfs_gc_force(pblk, buf, len);
-       else if (strcmp(attr->name, "max_sec_per_write") == 0)
-               return pblk_sysfs_set_sec_per_write(pblk, buf, len);
-       else if (strcmp(attr->name, "write_amp_trip") == 0)
-               return pblk_sysfs_set_write_amp_trip(pblk, buf, len);
-       else if (strcmp(attr->name, "padding_dist") == 0)
-               return pblk_sysfs_set_padding_dist(pblk, buf, len);
-       return 0;
-}
-
-static const struct sysfs_ops pblk_sysfs_ops = {
-       .show = pblk_sysfs_show,
-       .store = pblk_sysfs_store,
-};
-
-static struct kobj_type pblk_ktype = {
-       .sysfs_ops      = &pblk_sysfs_ops,
-       .default_attrs  = pblk_attrs,
-};
-
-int pblk_sysfs_init(struct gendisk *tdisk)
-{
-       struct pblk *pblk = tdisk->private_data;
-       struct device *parent_dev = disk_to_dev(pblk->disk);
-       int ret;
-
-       ret = kobject_init_and_add(&pblk->kobj, &pblk_ktype,
-                                       kobject_get(&parent_dev->kobj),
-                                       "%s", "pblk");
-       if (ret) {
-               pblk_err(pblk, "could not register\n");
-               return ret;
-       }
-
-       kobject_uevent(&pblk->kobj, KOBJ_ADD);
-       return 0;
-}
-
-void pblk_sysfs_exit(struct gendisk *tdisk)
-{
-       struct pblk *pblk = tdisk->private_data;
-
-       kobject_uevent(&pblk->kobj, KOBJ_REMOVE);
-       kobject_del(&pblk->kobj);
-       kobject_put(&pblk->kobj);
-}
diff --git a/drivers/lightnvm/pblk-trace.h b/drivers/lightnvm/pblk-trace.h
deleted file mode 100644 (file)
index 47b67c6..0000000
+++ /dev/null
@@ -1,145 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM pblk
-
-#if !defined(_TRACE_PBLK_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_PBLK_H
-
-#include <linux/tracepoint.h>
-
-struct ppa_addr;
-
-#define show_chunk_flags(state) __print_flags(state, "",       \
-       { NVM_CHK_ST_FREE,              "FREE",         },      \
-       { NVM_CHK_ST_CLOSED,            "CLOSED",       },      \
-       { NVM_CHK_ST_OPEN,              "OPEN",         },      \
-       { NVM_CHK_ST_OFFLINE,           "OFFLINE",      })
-
-#define show_line_state(state) __print_symbolic(state,         \
-       { PBLK_LINESTATE_NEW,           "NEW",          },      \
-       { PBLK_LINESTATE_FREE,          "FREE",         },      \
-       { PBLK_LINESTATE_OPEN,          "OPEN",         },      \
-       { PBLK_LINESTATE_CLOSED,        "CLOSED",       },      \
-       { PBLK_LINESTATE_GC,            "GC",           },      \
-       { PBLK_LINESTATE_BAD,           "BAD",          },      \
-       { PBLK_LINESTATE_CORRUPT,       "CORRUPT"       })
-
-
-#define show_pblk_state(state) __print_symbolic(state,         \
-       { PBLK_STATE_RUNNING,           "RUNNING",      },      \
-       { PBLK_STATE_STOPPING,          "STOPPING",     },      \
-       { PBLK_STATE_RECOVERING,        "RECOVERING",   },      \
-       { PBLK_STATE_STOPPED,           "STOPPED"       })
-
-#define show_chunk_erase_state(state) __print_symbolic(state,  \
-       { PBLK_CHUNK_RESET_START,       "START",        },      \
-       { PBLK_CHUNK_RESET_DONE,        "OK",           },      \
-       { PBLK_CHUNK_RESET_FAILED,      "FAILED"        })
-
-
-TRACE_EVENT(pblk_chunk_reset,
-
-       TP_PROTO(const char *name, struct ppa_addr *ppa, int state),
-
-       TP_ARGS(name, ppa, state),
-
-       TP_STRUCT__entry(
-               __string(name, name)
-               __field(u64, ppa)
-               __field(int, state)
-       ),
-
-       TP_fast_assign(
-               __assign_str(name, name);
-               __entry->ppa = ppa->ppa;
-               __entry->state = state;
-       ),
-
-       TP_printk("dev=%s grp=%llu pu=%llu chk=%llu state=%s", __get_str(name),
-                       (u64)(((struct ppa_addr *)(&__entry->ppa))->m.grp),
-                       (u64)(((struct ppa_addr *)(&__entry->ppa))->m.pu),
-                       (u64)(((struct ppa_addr *)(&__entry->ppa))->m.chk),
-                       show_chunk_erase_state((int)__entry->state))
-
-);
-
-TRACE_EVENT(pblk_chunk_state,
-
-       TP_PROTO(const char *name, struct ppa_addr *ppa, int state),
-
-       TP_ARGS(name, ppa, state),
-
-       TP_STRUCT__entry(
-               __string(name, name)
-               __field(u64, ppa)
-               __field(int, state)
-       ),
-
-       TP_fast_assign(
-               __assign_str(name, name);
-               __entry->ppa = ppa->ppa;
-               __entry->state = state;
-       ),
-
-       TP_printk("dev=%s grp=%llu pu=%llu chk=%llu state=%s", __get_str(name),
-                       (u64)(((struct ppa_addr *)(&__entry->ppa))->m.grp),
-                       (u64)(((struct ppa_addr *)(&__entry->ppa))->m.pu),
-                       (u64)(((struct ppa_addr *)(&__entry->ppa))->m.chk),
-                       show_chunk_flags((int)__entry->state))
-
-);
-
-TRACE_EVENT(pblk_line_state,
-
-       TP_PROTO(const char *name, int line, int state),
-
-       TP_ARGS(name, line, state),
-
-       TP_STRUCT__entry(
-               __string(name, name)
-               __field(int, line)
-               __field(int, state)
-       ),
-
-       TP_fast_assign(
-               __assign_str(name, name);
-               __entry->line = line;
-               __entry->state = state;
-       ),
-
-       TP_printk("dev=%s line=%d state=%s", __get_str(name),
-                       (int)__entry->line,
-                       show_line_state((int)__entry->state))
-
-);
-
-TRACE_EVENT(pblk_state,
-
-       TP_PROTO(const char *name, int state),
-
-       TP_ARGS(name, state),
-
-       TP_STRUCT__entry(
-               __string(name, name)
-               __field(int, state)
-       ),
-
-       TP_fast_assign(
-               __assign_str(name, name);
-               __entry->state = state;
-       ),
-
-       TP_printk("dev=%s state=%s", __get_str(name),
-                       show_pblk_state((int)__entry->state))
-
-);
-
-#endif /* !defined(_TRACE_PBLK_H) || defined(TRACE_HEADER_MULTI_READ) */
-
-/* This part must be outside protection */
-
-#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH ../../drivers/lightnvm
-#undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_FILE pblk-trace
-#include <trace/define_trace.h>
diff --git a/drivers/lightnvm/pblk-write.c b/drivers/lightnvm/pblk-write.c
deleted file mode 100644 (file)
index b9a2aeb..0000000
+++ /dev/null
@@ -1,665 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2016 CNEX Labs
- * Initial release: Javier Gonzalez <javier@cnexlabs.com>
- *                  Matias Bjorling <matias@cnexlabs.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * pblk-write.c - pblk's write path from write buffer to media
- */
-
-#include "pblk.h"
-#include "pblk-trace.h"
-
-static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd,
-                                   struct pblk_c_ctx *c_ctx)
-{
-       struct bio *original_bio;
-       struct pblk_rb *rwb = &pblk->rwb;
-       unsigned long ret;
-       int i;
-
-       for (i = 0; i < c_ctx->nr_valid; i++) {
-               struct pblk_w_ctx *w_ctx;
-               int pos = c_ctx->sentry + i;
-               int flags;
-
-               w_ctx = pblk_rb_w_ctx(rwb, pos);
-               flags = READ_ONCE(w_ctx->flags);
-
-               if (flags & PBLK_FLUSH_ENTRY) {
-                       flags &= ~PBLK_FLUSH_ENTRY;
-                       /* Release flags on context. Protect from writes */
-                       smp_store_release(&w_ctx->flags, flags);
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-                       atomic_dec(&rwb->inflight_flush_point);
-#endif
-               }
-
-               while ((original_bio = bio_list_pop(&w_ctx->bios)))
-                       bio_endio(original_bio);
-       }
-
-       if (c_ctx->nr_padded)
-               pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
-                                                       c_ctx->nr_padded);
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       atomic_long_add(rqd->nr_ppas, &pblk->sync_writes);
-#endif
-
-       ret = pblk_rb_sync_advance(&pblk->rwb, c_ctx->nr_valid);
-
-       bio_put(rqd->bio);
-       pblk_free_rqd(pblk, rqd, PBLK_WRITE);
-
-       return ret;
-}
-
-static unsigned long pblk_end_queued_w_bio(struct pblk *pblk,
-                                          struct nvm_rq *rqd,
-                                          struct pblk_c_ctx *c_ctx)
-{
-       list_del(&c_ctx->list);
-       return pblk_end_w_bio(pblk, rqd, c_ctx);
-}
-
-static void pblk_complete_write(struct pblk *pblk, struct nvm_rq *rqd,
-                               struct pblk_c_ctx *c_ctx)
-{
-       struct pblk_c_ctx *c, *r;
-       unsigned long flags;
-       unsigned long pos;
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       atomic_long_sub(c_ctx->nr_valid, &pblk->inflight_writes);
-#endif
-       pblk_up_rq(pblk, c_ctx->lun_bitmap);
-
-       pos = pblk_rb_sync_init(&pblk->rwb, &flags);
-       if (pos == c_ctx->sentry) {
-               pos = pblk_end_w_bio(pblk, rqd, c_ctx);
-
-retry:
-               list_for_each_entry_safe(c, r, &pblk->compl_list, list) {
-                       rqd = nvm_rq_from_c_ctx(c);
-                       if (c->sentry == pos) {
-                               pos = pblk_end_queued_w_bio(pblk, rqd, c);
-                               goto retry;
-                       }
-               }
-       } else {
-               WARN_ON(nvm_rq_from_c_ctx(c_ctx) != rqd);
-               list_add_tail(&c_ctx->list, &pblk->compl_list);
-       }
-       pblk_rb_sync_end(&pblk->rwb, &flags);
-}
-
-/* Map remaining sectors in chunk, starting from ppa */
-static void pblk_map_remaining(struct pblk *pblk, struct ppa_addr *ppa,
-               int rqd_ppas)
-{
-       struct pblk_line *line;
-       struct ppa_addr map_ppa = *ppa;
-       __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
-       __le64 *lba_list;
-       u64 paddr;
-       int done = 0;
-       int n = 0;
-
-       line = pblk_ppa_to_line(pblk, *ppa);
-       lba_list = emeta_to_lbas(pblk, line->emeta->buf);
-
-       spin_lock(&line->lock);
-
-       while (!done)  {
-               paddr = pblk_dev_ppa_to_line_addr(pblk, map_ppa);
-
-               if (!test_and_set_bit(paddr, line->map_bitmap))
-                       line->left_msecs--;
-
-               if (n < rqd_ppas && lba_list[paddr] != addr_empty)
-                       line->nr_valid_lbas--;
-
-               lba_list[paddr] = addr_empty;
-
-               if (!test_and_set_bit(paddr, line->invalid_bitmap))
-                       le32_add_cpu(line->vsc, -1);
-
-               done = nvm_next_ppa_in_chk(pblk->dev, &map_ppa);
-
-               n++;
-       }
-
-       line->w_err_gc->has_write_err = 1;
-       spin_unlock(&line->lock);
-}
-
-static void pblk_prepare_resubmit(struct pblk *pblk, unsigned int sentry,
-                                 unsigned int nr_entries)
-{
-       struct pblk_rb *rb = &pblk->rwb;
-       struct pblk_rb_entry *entry;
-       struct pblk_line *line;
-       struct pblk_w_ctx *w_ctx;
-       struct ppa_addr ppa_l2p;
-       int flags;
-       unsigned int i;
-
-       spin_lock(&pblk->trans_lock);
-       for (i = 0; i < nr_entries; i++) {
-               entry = &rb->entries[pblk_rb_ptr_wrap(rb, sentry, i)];
-               w_ctx = &entry->w_ctx;
-
-               /* Check if the lba has been overwritten */
-               if (w_ctx->lba != ADDR_EMPTY) {
-                       ppa_l2p = pblk_trans_map_get(pblk, w_ctx->lba);
-                       if (!pblk_ppa_comp(ppa_l2p, entry->cacheline))
-                               w_ctx->lba = ADDR_EMPTY;
-               }
-
-               /* Mark up the entry as submittable again */
-               flags = READ_ONCE(w_ctx->flags);
-               flags |= PBLK_WRITTEN_DATA;
-               /* Release flags on write context. Protect from writes */
-               smp_store_release(&w_ctx->flags, flags);
-
-               /* Decrease the reference count to the line as we will
-                * re-map these entries
-                */
-               line = pblk_ppa_to_line(pblk, w_ctx->ppa);
-               atomic_dec(&line->sec_to_update);
-               kref_put(&line->ref, pblk_line_put);
-       }
-       spin_unlock(&pblk->trans_lock);
-}
-
-static void pblk_queue_resubmit(struct pblk *pblk, struct pblk_c_ctx *c_ctx)
-{
-       struct pblk_c_ctx *r_ctx;
-
-       r_ctx = kzalloc(sizeof(struct pblk_c_ctx), GFP_KERNEL);
-       if (!r_ctx)
-               return;
-
-       r_ctx->lun_bitmap = NULL;
-       r_ctx->sentry = c_ctx->sentry;
-       r_ctx->nr_valid = c_ctx->nr_valid;
-       r_ctx->nr_padded = c_ctx->nr_padded;
-
-       spin_lock(&pblk->resubmit_lock);
-       list_add_tail(&r_ctx->list, &pblk->resubmit_list);
-       spin_unlock(&pblk->resubmit_lock);
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       atomic_long_add(c_ctx->nr_valid, &pblk->recov_writes);
-#endif
-}
-
-static void pblk_submit_rec(struct work_struct *work)
-{
-       struct pblk_rec_ctx *recovery =
-                       container_of(work, struct pblk_rec_ctx, ws_rec);
-       struct pblk *pblk = recovery->pblk;
-       struct nvm_rq *rqd = recovery->rqd;
-       struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
-       struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
-
-       pblk_log_write_err(pblk, rqd);
-
-       pblk_map_remaining(pblk, ppa_list, rqd->nr_ppas);
-       pblk_queue_resubmit(pblk, c_ctx);
-
-       pblk_up_rq(pblk, c_ctx->lun_bitmap);
-       if (c_ctx->nr_padded)
-               pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
-                                                       c_ctx->nr_padded);
-       bio_put(rqd->bio);
-       pblk_free_rqd(pblk, rqd, PBLK_WRITE);
-       mempool_free(recovery, &pblk->rec_pool);
-
-       atomic_dec(&pblk->inflight_io);
-       pblk_write_kick(pblk);
-}
-
-
-static void pblk_end_w_fail(struct pblk *pblk, struct nvm_rq *rqd)
-{
-       struct pblk_rec_ctx *recovery;
-
-       recovery = mempool_alloc(&pblk->rec_pool, GFP_ATOMIC);
-       if (!recovery) {
-               pblk_err(pblk, "could not allocate recovery work\n");
-               return;
-       }
-
-       recovery->pblk = pblk;
-       recovery->rqd = rqd;
-
-       INIT_WORK(&recovery->ws_rec, pblk_submit_rec);
-       queue_work(pblk->close_wq, &recovery->ws_rec);
-}
-
-static void pblk_end_io_write(struct nvm_rq *rqd)
-{
-       struct pblk *pblk = rqd->private;
-       struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
-
-       if (rqd->error) {
-               pblk_end_w_fail(pblk, rqd);
-               return;
-       } else {
-               if (trace_pblk_chunk_state_enabled())
-                       pblk_check_chunk_state_update(pblk, rqd);
-#ifdef CONFIG_NVM_PBLK_DEBUG
-               WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n");
-#endif
-       }
-
-       pblk_complete_write(pblk, rqd, c_ctx);
-       atomic_dec(&pblk->inflight_io);
-}
-
-static void pblk_end_io_write_meta(struct nvm_rq *rqd)
-{
-       struct pblk *pblk = rqd->private;
-       struct pblk_g_ctx *m_ctx = nvm_rq_to_pdu(rqd);
-       struct pblk_line *line = m_ctx->private;
-       struct pblk_emeta *emeta = line->emeta;
-       struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
-       int sync;
-
-       pblk_up_chunk(pblk, ppa_list[0]);
-
-       if (rqd->error) {
-               pblk_log_write_err(pblk, rqd);
-               pblk_err(pblk, "metadata I/O failed. Line %d\n", line->id);
-               line->w_err_gc->has_write_err = 1;
-       } else {
-               if (trace_pblk_chunk_state_enabled())
-                       pblk_check_chunk_state_update(pblk, rqd);
-       }
-
-       sync = atomic_add_return(rqd->nr_ppas, &emeta->sync);
-       if (sync == emeta->nr_entries)
-               pblk_gen_run_ws(pblk, line, NULL, pblk_line_close_ws,
-                                               GFP_ATOMIC, pblk->close_wq);
-
-       pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
-
-       atomic_dec(&pblk->inflight_io);
-}
-
-static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
-                          unsigned int nr_secs, nvm_end_io_fn(*end_io))
-{
-       /* Setup write request */
-       rqd->opcode = NVM_OP_PWRITE;
-       rqd->nr_ppas = nr_secs;
-       rqd->is_seq = 1;
-       rqd->private = pblk;
-       rqd->end_io = end_io;
-
-       return pblk_alloc_rqd_meta(pblk, rqd);
-}
-
-static int pblk_setup_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
-                          struct ppa_addr *erase_ppa)
-{
-       struct pblk_line_meta *lm = &pblk->lm;
-       struct pblk_line *e_line = pblk_line_get_erase(pblk);
-       struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
-       unsigned int valid = c_ctx->nr_valid;
-       unsigned int padded = c_ctx->nr_padded;
-       unsigned int nr_secs = valid + padded;
-       unsigned long *lun_bitmap;
-       int ret;
-
-       lun_bitmap = kzalloc(lm->lun_bitmap_len, GFP_KERNEL);
-       if (!lun_bitmap)
-               return -ENOMEM;
-       c_ctx->lun_bitmap = lun_bitmap;
-
-       ret = pblk_alloc_w_rq(pblk, rqd, nr_secs, pblk_end_io_write);
-       if (ret) {
-               kfree(lun_bitmap);
-               return ret;
-       }
-
-       if (likely(!e_line || !atomic_read(&e_line->left_eblks)))
-               ret = pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap,
-                                                       valid, 0);
-       else
-               ret = pblk_map_erase_rq(pblk, rqd, c_ctx->sentry, lun_bitmap,
-                                                       valid, erase_ppa);
-
-       return ret;
-}
-
-static int pblk_calc_secs_to_sync(struct pblk *pblk, unsigned int secs_avail,
-                                 unsigned int secs_to_flush)
-{
-       int secs_to_sync;
-
-       secs_to_sync = pblk_calc_secs(pblk, secs_avail, secs_to_flush, true);
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       if ((!secs_to_sync && secs_to_flush)
-                       || (secs_to_sync < 0)
-                       || (secs_to_sync > secs_avail && !secs_to_flush)) {
-               pblk_err(pblk, "bad sector calculation (a:%d,s:%d,f:%d)\n",
-                               secs_avail, secs_to_sync, secs_to_flush);
-       }
-#endif
-
-       return secs_to_sync;
-}
-
-int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       struct pblk_line_meta *lm = &pblk->lm;
-       struct pblk_emeta *emeta = meta_line->emeta;
-       struct ppa_addr *ppa_list;
-       struct pblk_g_ctx *m_ctx;
-       struct nvm_rq *rqd;
-       void *data;
-       u64 paddr;
-       int rq_ppas = pblk->min_write_pgs;
-       int id = meta_line->id;
-       int rq_len;
-       int i, j;
-       int ret;
-
-       rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT);
-
-       m_ctx = nvm_rq_to_pdu(rqd);
-       m_ctx->private = meta_line;
-
-       rq_len = rq_ppas * geo->csecs;
-       data = ((void *)emeta->buf) + emeta->mem;
-
-       ret = pblk_alloc_w_rq(pblk, rqd, rq_ppas, pblk_end_io_write_meta);
-       if (ret)
-               goto fail_free_rqd;
-
-       ppa_list = nvm_rq_to_ppa_list(rqd);
-       for (i = 0; i < rqd->nr_ppas; ) {
-               spin_lock(&meta_line->lock);
-               paddr = __pblk_alloc_page(pblk, meta_line, rq_ppas);
-               spin_unlock(&meta_line->lock);
-               for (j = 0; j < rq_ppas; j++, i++, paddr++)
-                       ppa_list[i] = addr_to_gen_ppa(pblk, paddr, id);
-       }
-
-       spin_lock(&l_mg->close_lock);
-       emeta->mem += rq_len;
-       if (emeta->mem >= lm->emeta_len[0])
-               list_del(&meta_line->list);
-       spin_unlock(&l_mg->close_lock);
-
-       pblk_down_chunk(pblk, ppa_list[0]);
-
-       ret = pblk_submit_io(pblk, rqd, data);
-       if (ret) {
-               pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
-               goto fail_rollback;
-       }
-
-       return NVM_IO_OK;
-
-fail_rollback:
-       pblk_up_chunk(pblk, ppa_list[0]);
-       spin_lock(&l_mg->close_lock);
-       pblk_dealloc_page(pblk, meta_line, rq_ppas);
-       list_add(&meta_line->list, &meta_line->list);
-       spin_unlock(&l_mg->close_lock);
-fail_free_rqd:
-       pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
-       return ret;
-}
-
-static inline bool pblk_valid_meta_ppa(struct pblk *pblk,
-                                      struct pblk_line *meta_line,
-                                      struct nvm_rq *data_rqd)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct pblk_c_ctx *data_c_ctx = nvm_rq_to_pdu(data_rqd);
-       struct pblk_line *data_line = pblk_line_get_data(pblk);
-       struct ppa_addr ppa, ppa_opt;
-       u64 paddr;
-       int pos_opt;
-
-       /* Schedule a metadata I/O that is half the distance from the data I/O
-        * with regards to the number of LUNs forming the pblk instance. This
-        * balances LUN conflicts across every I/O.
-        *
-        * When the LUN configuration changes (e.g., due to GC), this distance
-        * can align, which would result on metadata and data I/Os colliding. In
-        * this case, modify the distance to not be optimal, but move the
-        * optimal in the right direction.
-        */
-       paddr = pblk_lookup_page(pblk, meta_line);
-       ppa = addr_to_gen_ppa(pblk, paddr, 0);
-       ppa_opt = addr_to_gen_ppa(pblk, paddr + data_line->meta_distance, 0);
-       pos_opt = pblk_ppa_to_pos(geo, ppa_opt);
-
-       if (test_bit(pos_opt, data_c_ctx->lun_bitmap) ||
-                               test_bit(pos_opt, data_line->blk_bitmap))
-               return true;
-
-       if (unlikely(pblk_ppa_comp(ppa_opt, ppa)))
-               data_line->meta_distance--;
-
-       return false;
-}
-
-static struct pblk_line *pblk_should_submit_meta_io(struct pblk *pblk,
-                                                   struct nvm_rq *data_rqd)
-{
-       struct pblk_line_meta *lm = &pblk->lm;
-       struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-       struct pblk_line *meta_line;
-
-       spin_lock(&l_mg->close_lock);
-       if (list_empty(&l_mg->emeta_list)) {
-               spin_unlock(&l_mg->close_lock);
-               return NULL;
-       }
-       meta_line = list_first_entry(&l_mg->emeta_list, struct pblk_line, list);
-       if (meta_line->emeta->mem >= lm->emeta_len[0]) {
-               spin_unlock(&l_mg->close_lock);
-               return NULL;
-       }
-       spin_unlock(&l_mg->close_lock);
-
-       if (!pblk_valid_meta_ppa(pblk, meta_line, data_rqd))
-               return NULL;
-
-       return meta_line;
-}
-
-static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
-{
-       struct ppa_addr erase_ppa;
-       struct pblk_line *meta_line;
-       int err;
-
-       pblk_ppa_set_empty(&erase_ppa);
-
-       /* Assign lbas to ppas and populate request structure */
-       err = pblk_setup_w_rq(pblk, rqd, &erase_ppa);
-       if (err) {
-               pblk_err(pblk, "could not setup write request: %d\n", err);
-               return NVM_IO_ERR;
-       }
-
-       meta_line = pblk_should_submit_meta_io(pblk, rqd);
-
-       /* Submit data write for current data line */
-       err = pblk_submit_io(pblk, rqd, NULL);
-       if (err) {
-               pblk_err(pblk, "data I/O submission failed: %d\n", err);
-               return NVM_IO_ERR;
-       }
-
-       if (!pblk_ppa_empty(erase_ppa)) {
-               /* Submit erase for next data line */
-               if (pblk_blk_erase_async(pblk, erase_ppa)) {
-                       struct pblk_line *e_line = pblk_line_get_erase(pblk);
-                       struct nvm_tgt_dev *dev = pblk->dev;
-                       struct nvm_geo *geo = &dev->geo;
-                       int bit;
-
-                       atomic_inc(&e_line->left_eblks);
-                       bit = pblk_ppa_to_pos(geo, erase_ppa);
-                       WARN_ON(!test_and_clear_bit(bit, e_line->erase_bitmap));
-               }
-       }
-
-       if (meta_line) {
-               /* Submit metadata write for previous data line */
-               err = pblk_submit_meta_io(pblk, meta_line);
-               if (err) {
-                       pblk_err(pblk, "metadata I/O submission failed: %d",
-                                       err);
-                       return NVM_IO_ERR;
-               }
-       }
-
-       return NVM_IO_OK;
-}
-
-static void pblk_free_write_rqd(struct pblk *pblk, struct nvm_rq *rqd)
-{
-       struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
-       struct bio *bio = rqd->bio;
-
-       if (c_ctx->nr_padded)
-               pblk_bio_free_pages(pblk, bio, c_ctx->nr_valid,
-                                                       c_ctx->nr_padded);
-}
-
-static int pblk_submit_write(struct pblk *pblk, int *secs_left)
-{
-       struct bio *bio;
-       struct nvm_rq *rqd;
-       unsigned int secs_avail, secs_to_sync, secs_to_com;
-       unsigned int secs_to_flush, packed_meta_pgs;
-       unsigned long pos;
-       unsigned int resubmit;
-
-       *secs_left = 0;
-
-       spin_lock(&pblk->resubmit_lock);
-       resubmit = !list_empty(&pblk->resubmit_list);
-       spin_unlock(&pblk->resubmit_lock);
-
-       /* Resubmit failed writes first */
-       if (resubmit) {
-               struct pblk_c_ctx *r_ctx;
-
-               spin_lock(&pblk->resubmit_lock);
-               r_ctx = list_first_entry(&pblk->resubmit_list,
-                                       struct pblk_c_ctx, list);
-               list_del(&r_ctx->list);
-               spin_unlock(&pblk->resubmit_lock);
-
-               secs_avail = r_ctx->nr_valid;
-               pos = r_ctx->sentry;
-
-               pblk_prepare_resubmit(pblk, pos, secs_avail);
-               secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail,
-                               secs_avail);
-
-               kfree(r_ctx);
-       } else {
-               /* If there are no sectors in the cache,
-                * flushes (bios without data) will be cleared on
-                * the cache threads
-                */
-               secs_avail = pblk_rb_read_count(&pblk->rwb);
-               if (!secs_avail)
-                       return 0;
-
-               secs_to_flush = pblk_rb_flush_point_count(&pblk->rwb);
-               if (!secs_to_flush && secs_avail < pblk->min_write_pgs_data)
-                       return 0;
-
-               secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail,
-                                       secs_to_flush);
-               if (secs_to_sync > pblk->max_write_pgs) {
-                       pblk_err(pblk, "bad buffer sync calculation\n");
-                       return 0;
-               }
-
-               secs_to_com = (secs_to_sync > secs_avail) ?
-                       secs_avail : secs_to_sync;
-               pos = pblk_rb_read_commit(&pblk->rwb, secs_to_com);
-       }
-
-       packed_meta_pgs = (pblk->min_write_pgs - pblk->min_write_pgs_data);
-       bio = bio_alloc(GFP_KERNEL, secs_to_sync + packed_meta_pgs);
-
-       bio->bi_iter.bi_sector = 0; /* internal bio */
-       bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
-
-       rqd = pblk_alloc_rqd(pblk, PBLK_WRITE);
-       rqd->bio = bio;
-
-       if (pblk_rb_read_to_bio(&pblk->rwb, rqd, pos, secs_to_sync,
-                                                               secs_avail)) {
-               pblk_err(pblk, "corrupted write bio\n");
-               goto fail_put_bio;
-       }
-
-       if (pblk_submit_io_set(pblk, rqd))
-               goto fail_free_bio;
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       atomic_long_add(secs_to_sync, &pblk->sub_writes);
-#endif
-
-       *secs_left = 1;
-       return 0;
-
-fail_free_bio:
-       pblk_free_write_rqd(pblk, rqd);
-fail_put_bio:
-       bio_put(bio);
-       pblk_free_rqd(pblk, rqd, PBLK_WRITE);
-
-       return -EINTR;
-}
-
-int pblk_write_ts(void *data)
-{
-       struct pblk *pblk = data;
-       int secs_left;
-       int write_failure = 0;
-
-       while (!kthread_should_stop()) {
-               if (!write_failure) {
-                       write_failure = pblk_submit_write(pblk, &secs_left);
-
-                       if (secs_left)
-                               continue;
-               }
-               set_current_state(TASK_INTERRUPTIBLE);
-               io_schedule();
-       }
-
-       return 0;
-}
diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h
deleted file mode 100644 (file)
index 86ffa87..0000000
+++ /dev/null
@@ -1,1358 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2015 IT University of Copenhagen (rrpc.h)
- * Copyright (C) 2016 CNEX Labs
- * Initial release: Matias Bjorling <matias@cnexlabs.com>
- * Write buffering: Javier Gonzalez <javier@cnexlabs.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * Implementation of a Physical Block-device target for Open-channel SSDs.
- *
- */
-
-#ifndef PBLK_H_
-#define PBLK_H_
-
-#include <linux/blkdev.h>
-#include <linux/blk-mq.h>
-#include <linux/bio.h>
-#include <linux/module.h>
-#include <linux/kthread.h>
-#include <linux/vmalloc.h>
-#include <linux/crc32.h>
-#include <linux/uuid.h>
-
-#include <linux/lightnvm.h>
-
-/* Run only GC if less than 1/X blocks are free */
-#define GC_LIMIT_INVERSE 5
-#define GC_TIME_MSECS 1000
-
-#define PBLK_SECTOR (512)
-#define PBLK_EXPOSED_PAGE_SIZE (4096)
-
-#define PBLK_NR_CLOSE_JOBS (4)
-
-#define PBLK_CACHE_NAME_LEN (DISK_NAME_LEN + 16)
-
-/* Max 512 LUNs per device */
-#define PBLK_MAX_LUNS_BITMAP (4)
-
-#define NR_PHY_IN_LOG (PBLK_EXPOSED_PAGE_SIZE / PBLK_SECTOR)
-
-/* Static pool sizes */
-#define PBLK_GEN_WS_POOL_SIZE (2)
-
-#define PBLK_DEFAULT_OP (11)
-
-enum {
-       PBLK_READ               = READ,
-       PBLK_WRITE              = WRITE,/* Write from write buffer */
-       PBLK_WRITE_INT,                 /* Internal write - no write buffer */
-       PBLK_READ_RECOV,                /* Recovery read - errors allowed */
-       PBLK_ERASE,
-};
-
-enum {
-       /* IO Types */
-       PBLK_IOTYPE_USER        = 1 << 0,
-       PBLK_IOTYPE_GC          = 1 << 1,
-
-       /* Write buffer flags */
-       PBLK_FLUSH_ENTRY        = 1 << 2,
-       PBLK_WRITTEN_DATA       = 1 << 3,
-       PBLK_SUBMITTED_ENTRY    = 1 << 4,
-       PBLK_WRITABLE_ENTRY     = 1 << 5,
-};
-
-enum {
-       PBLK_BLK_ST_OPEN =      0x1,
-       PBLK_BLK_ST_CLOSED =    0x2,
-};
-
-enum {
-       PBLK_CHUNK_RESET_START,
-       PBLK_CHUNK_RESET_DONE,
-       PBLK_CHUNK_RESET_FAILED,
-};
-
-struct pblk_sec_meta {
-       u64 reserved;
-       __le64 lba;
-};
-
-/* The number of GC lists and the rate-limiter states go together. This way the
- * rate-limiter can dictate how much GC is needed based on resource utilization.
- */
-#define PBLK_GC_NR_LISTS 4
-
-enum {
-       PBLK_RL_OFF = 0,
-       PBLK_RL_WERR = 1,
-       PBLK_RL_HIGH = 2,
-       PBLK_RL_MID = 3,
-       PBLK_RL_LOW = 4
-};
-
-#define pblk_dma_ppa_size (sizeof(u64) * NVM_MAX_VLBA)
-
-/* write buffer completion context */
-struct pblk_c_ctx {
-       struct list_head list;          /* Head for out-of-order completion */
-
-       unsigned long *lun_bitmap;      /* Luns used on current request */
-       unsigned int sentry;
-       unsigned int nr_valid;
-       unsigned int nr_padded;
-};
-
-/* read context */
-struct pblk_g_ctx {
-       void *private;
-       unsigned long start_time;
-       u64 lba;
-};
-
-/* Pad context */
-struct pblk_pad_rq {
-       struct pblk *pblk;
-       struct completion wait;
-       struct kref ref;
-};
-
-/* Recovery context */
-struct pblk_rec_ctx {
-       struct pblk *pblk;
-       struct nvm_rq *rqd;
-       struct work_struct ws_rec;
-};
-
-/* Write context */
-struct pblk_w_ctx {
-       struct bio_list bios;           /* Original bios - used for completion
-                                        * in REQ_FUA, REQ_FLUSH case
-                                        */
-       u64 lba;                        /* Logic addr. associated with entry */
-       struct ppa_addr ppa;            /* Physic addr. associated with entry */
-       int flags;                      /* Write context flags */
-};
-
-struct pblk_rb_entry {
-       struct ppa_addr cacheline;      /* Cacheline for this entry */
-       void *data;                     /* Pointer to data on this entry */
-       struct pblk_w_ctx w_ctx;        /* Context for this entry */
-       struct list_head index;         /* List head to enable indexes */
-};
-
-#define EMPTY_ENTRY (~0U)
-
-struct pblk_rb_pages {
-       struct page *pages;
-       int order;
-       struct list_head list;
-};
-
-struct pblk_rb {
-       struct pblk_rb_entry *entries;  /* Ring buffer entries */
-       unsigned int mem;               /* Write offset - points to next
-                                        * writable entry in memory
-                                        */
-       unsigned int subm;              /* Read offset - points to last entry
-                                        * that has been submitted to the media
-                                        * to be persisted
-                                        */
-       unsigned int sync;              /* Synced - backpointer that signals
-                                        * the last submitted entry that has
-                                        * been successfully persisted to media
-                                        */
-       unsigned int flush_point;       /* Sync point - last entry that must be
-                                        * flushed to the media. Used with
-                                        * REQ_FLUSH and REQ_FUA
-                                        */
-       unsigned int l2p_update;        /* l2p update point - next entry for
-                                        * which l2p mapping will be updated to
-                                        * contain a device ppa address (instead
-                                        * of a cacheline
-                                        */
-       unsigned int nr_entries;        /* Number of entries in write buffer -
-                                        * must be a power of two
-                                        */
-       unsigned int seg_size;          /* Size of the data segments being
-                                        * stored on each entry. Typically this
-                                        * will be 4KB
-                                        */
-
-       unsigned int back_thres;        /* Threshold that shall be maintained by
-                                        * the backpointer in order to respect
-                                        * geo->mw_cunits on a per chunk basis
-                                        */
-
-       struct list_head pages;         /* List of data pages */
-
-       spinlock_t w_lock;              /* Write lock */
-       spinlock_t s_lock;              /* Sync lock */
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       atomic_t inflight_flush_point;  /* Not served REQ_FLUSH | REQ_FUA */
-#endif
-};
-
-#define PBLK_RECOVERY_SECTORS 16
-
-struct pblk_lun {
-       struct ppa_addr bppa;
-       struct semaphore wr_sem;
-};
-
-struct pblk_gc_rq {
-       struct pblk_line *line;
-       void *data;
-       u64 paddr_list[NVM_MAX_VLBA];
-       u64 lba_list[NVM_MAX_VLBA];
-       int nr_secs;
-       int secs_to_gc;
-       struct list_head list;
-};
-
-struct pblk_gc {
-       /* These states are not protected by a lock since (i) they are in the
-        * fast path, and (ii) they are not critical.
-        */
-       int gc_active;
-       int gc_enabled;
-       int gc_forced;
-
-       struct task_struct *gc_ts;
-       struct task_struct *gc_writer_ts;
-       struct task_struct *gc_reader_ts;
-
-       struct workqueue_struct *gc_line_reader_wq;
-       struct workqueue_struct *gc_reader_wq;
-
-       struct timer_list gc_timer;
-
-       struct semaphore gc_sem;
-       atomic_t read_inflight_gc; /* Number of lines with inflight GC reads */
-       atomic_t pipeline_gc;      /* Number of lines in the GC pipeline -
-                                   * started reads to finished writes
-                                   */
-       int w_entries;
-
-       struct list_head w_list;
-       struct list_head r_list;
-
-       spinlock_t lock;
-       spinlock_t w_lock;
-       spinlock_t r_lock;
-};
-
-struct pblk_rl {
-       unsigned int high;      /* Upper threshold for rate limiter (free run -
-                                * user I/O rate limiter
-                                */
-       unsigned int high_pw;   /* High rounded up as a power of 2 */
-
-#define PBLK_USER_HIGH_THRS 8  /* Begin write limit at 12% available blks */
-#define PBLK_USER_LOW_THRS 10  /* Aggressive GC at 10% available blocks */
-
-       int rb_windows_pw;      /* Number of rate windows in the write buffer
-                                * given as a power-of-2. This guarantees that
-                                * when user I/O is being rate limited, there
-                                * will be reserved enough space for the GC to
-                                * place its payload. A window is of
-                                * pblk->max_write_pgs size, which in NVMe is
-                                * 64, i.e., 256kb.
-                                */
-       int rb_budget;          /* Total number of entries available for I/O */
-       int rb_user_max;        /* Max buffer entries available for user I/O */
-       int rb_gc_max;          /* Max buffer entries available for GC I/O */
-       int rb_gc_rsv;          /* Reserved buffer entries for GC I/O */
-       int rb_state;           /* Rate-limiter current state */
-       int rb_max_io;          /* Maximum size for an I/O giving the config */
-
-       atomic_t rb_user_cnt;   /* User I/O buffer counter */
-       atomic_t rb_gc_cnt;     /* GC I/O buffer counter */
-       atomic_t rb_space;      /* Space limit in case of reaching capacity */
-
-       int rsv_blocks;         /* Reserved blocks for GC */
-
-       int rb_user_active;
-       int rb_gc_active;
-
-       atomic_t werr_lines;    /* Number of write error lines that needs gc */
-
-       struct timer_list u_timer;
-
-       unsigned long total_blocks;
-
-       atomic_t free_blocks;           /* Total number of free blocks (+ OP) */
-       atomic_t free_user_blocks;      /* Number of user free blocks (no OP) */
-};
-
-#define PBLK_LINE_EMPTY (~0U)
-
-enum {
-       /* Line Types */
-       PBLK_LINETYPE_FREE = 0,
-       PBLK_LINETYPE_LOG = 1,
-       PBLK_LINETYPE_DATA = 2,
-
-       /* Line state */
-       PBLK_LINESTATE_NEW = 9,
-       PBLK_LINESTATE_FREE = 10,
-       PBLK_LINESTATE_OPEN = 11,
-       PBLK_LINESTATE_CLOSED = 12,
-       PBLK_LINESTATE_GC = 13,
-       PBLK_LINESTATE_BAD = 14,
-       PBLK_LINESTATE_CORRUPT = 15,
-
-       /* GC group */
-       PBLK_LINEGC_NONE = 20,
-       PBLK_LINEGC_EMPTY = 21,
-       PBLK_LINEGC_LOW = 22,
-       PBLK_LINEGC_MID = 23,
-       PBLK_LINEGC_HIGH = 24,
-       PBLK_LINEGC_FULL = 25,
-       PBLK_LINEGC_WERR = 26
-};
-
-#define PBLK_MAGIC 0x70626c6b /*pblk*/
-
-/* emeta/smeta persistent storage format versions:
- * Changes in major version requires offline migration.
- * Changes in minor version are handled automatically during
- * recovery.
- */
-
-#define SMETA_VERSION_MAJOR (0)
-#define SMETA_VERSION_MINOR (1)
-
-#define EMETA_VERSION_MAJOR (0)
-#define EMETA_VERSION_MINOR (2)
-
-struct line_header {
-       __le32 crc;
-       __le32 identifier;      /* pblk identifier */
-       __u8 uuid[16];          /* instance uuid */
-       __le16 type;            /* line type */
-       __u8 version_major;     /* version major */
-       __u8 version_minor;     /* version minor */
-       __le32 id;              /* line id for current line */
-};
-
-struct line_smeta {
-       struct line_header header;
-
-       __le32 crc;             /* Full structure including struct crc */
-       /* Previous line metadata */
-       __le32 prev_id;         /* Line id for previous line */
-
-       /* Current line metadata */
-       __le64 seq_nr;          /* Sequence number for current line */
-
-       /* Active writers */
-       __le32 window_wr_lun;   /* Number of parallel LUNs to write */
-
-       __le32 rsvd[2];
-
-       __le64 lun_bitmap[];
-};
-
-
-/*
- * Metadata layout in media:
- *     First sector:
- *             1. struct line_emeta
- *             2. bad block bitmap (u64 * window_wr_lun)
- *             3. write amplification counters
- *     Mid sectors (start at lbas_sector):
- *             3. nr_lbas (u64) forming lba list
- *     Last sectors (start at vsc_sector):
- *             4. u32 valid sector count (vsc) for all lines (~0U: free line)
- */
-struct line_emeta {
-       struct line_header header;
-
-       __le32 crc;             /* Full structure including struct crc */
-
-       /* Previous line metadata */
-       __le32 prev_id;         /* Line id for prev line */
-
-       /* Current line metadata */
-       __le64 seq_nr;          /* Sequence number for current line */
-
-       /* Active writers */
-       __le32 window_wr_lun;   /* Number of parallel LUNs to write */
-
-       /* Bookkeeping for recovery */
-       __le32 next_id;         /* Line id for next line */
-       __le64 nr_lbas;         /* Number of lbas mapped in line */
-       __le64 nr_valid_lbas;   /* Number of valid lbas mapped in line */
-       __le64 bb_bitmap[];     /* Updated bad block bitmap for line */
-};
-
-
-/* Write amplification counters stored on media */
-struct wa_counters {
-       __le64 user;            /* Number of user written sectors */
-       __le64 gc;              /* Number of sectors written by GC*/
-       __le64 pad;             /* Number of padded sectors */
-};
-
-struct pblk_emeta {
-       struct line_emeta *buf;         /* emeta buffer in media format */
-       int mem;                        /* Write offset - points to next
-                                        * writable entry in memory
-                                        */
-       atomic_t sync;                  /* Synced - backpointer that signals the
-                                        * last entry that has been successfully
-                                        * persisted to media
-                                        */
-       unsigned int nr_entries;        /* Number of emeta entries */
-};
-
-struct pblk_smeta {
-       struct line_smeta *buf;         /* smeta buffer in persistent format */
-};
-
-struct pblk_w_err_gc {
-       int has_write_err;
-       int has_gc_err;
-       __le64 *lba_list;
-};
-
-struct pblk_line {
-       struct pblk *pblk;
-       unsigned int id;                /* Line number corresponds to the
-                                        * block line
-                                        */
-       unsigned int seq_nr;            /* Unique line sequence number */
-
-       int state;                      /* PBLK_LINESTATE_X */
-       int type;                       /* PBLK_LINETYPE_X */
-       int gc_group;                   /* PBLK_LINEGC_X */
-       struct list_head list;          /* Free, GC lists */
-
-       unsigned long *lun_bitmap;      /* Bitmap for LUNs mapped in line */
-
-       struct nvm_chk_meta *chks;      /* Chunks forming line */
-
-       struct pblk_smeta *smeta;       /* Start metadata */
-       struct pblk_emeta *emeta;       /* End medatada */
-
-       int meta_line;                  /* Metadata line id */
-       int meta_distance;              /* Distance between data and metadata */
-
-       u64 emeta_ssec;                 /* Sector where emeta starts */
-
-       unsigned int sec_in_line;       /* Number of usable secs in line */
-
-       atomic_t blk_in_line;           /* Number of good blocks in line */
-       unsigned long *blk_bitmap;      /* Bitmap for valid/invalid blocks */
-       unsigned long *erase_bitmap;    /* Bitmap for erased blocks */
-
-       unsigned long *map_bitmap;      /* Bitmap for mapped sectors in line */
-       unsigned long *invalid_bitmap;  /* Bitmap for invalid sectors in line */
-
-       atomic_t left_eblks;            /* Blocks left for erasing */
-       atomic_t left_seblks;           /* Blocks left for sync erasing */
-
-       int left_msecs;                 /* Sectors left for mapping */
-       unsigned int cur_sec;           /* Sector map pointer */
-       unsigned int nr_valid_lbas;     /* Number of valid lbas in line */
-
-       __le32 *vsc;                    /* Valid sector count in line */
-
-       struct kref ref;                /* Write buffer L2P references */
-       atomic_t sec_to_update;         /* Outstanding L2P updates to ppa */
-
-       struct pblk_w_err_gc *w_err_gc; /* Write error gc recovery metadata */
-
-       spinlock_t lock;                /* Necessary for invalid_bitmap only */
-};
-
-#define PBLK_DATA_LINES 4
-
-enum {
-       PBLK_EMETA_TYPE_HEADER = 1,     /* struct line_emeta first sector */
-       PBLK_EMETA_TYPE_LLBA = 2,       /* lba list - type: __le64 */
-       PBLK_EMETA_TYPE_VSC = 3,        /* vsc list - type: __le32 */
-};
-
-struct pblk_line_mgmt {
-       int nr_lines;                   /* Total number of full lines */
-       int nr_free_lines;              /* Number of full lines in free list */
-
-       /* Free lists - use free_lock */
-       struct list_head free_list;     /* Full lines ready to use */
-       struct list_head corrupt_list;  /* Full lines corrupted */
-       struct list_head bad_list;      /* Full lines bad */
-
-       /* GC lists - use gc_lock */
-       struct list_head *gc_lists[PBLK_GC_NR_LISTS];
-       struct list_head gc_high_list;  /* Full lines ready to GC, high isc */
-       struct list_head gc_mid_list;   /* Full lines ready to GC, mid isc */
-       struct list_head gc_low_list;   /* Full lines ready to GC, low isc */
-
-       struct list_head gc_werr_list;  /* Write err recovery list */
-
-       struct list_head gc_full_list;  /* Full lines ready to GC, no valid */
-       struct list_head gc_empty_list; /* Full lines close, all valid */
-
-       struct pblk_line *log_line;     /* Current FTL log line */
-       struct pblk_line *data_line;    /* Current data line */
-       struct pblk_line *log_next;     /* Next FTL log line */
-       struct pblk_line *data_next;    /* Next data line */
-
-       struct list_head emeta_list;    /* Lines queued to schedule emeta */
-
-       __le32 *vsc_list;               /* Valid sector counts for all lines */
-
-       /* Pre-allocated metadata for data lines */
-       struct pblk_smeta *sline_meta[PBLK_DATA_LINES];
-       struct pblk_emeta *eline_meta[PBLK_DATA_LINES];
-       unsigned long meta_bitmap;
-
-       /* Cache and mempool for map/invalid bitmaps */
-       struct kmem_cache *bitmap_cache;
-       mempool_t *bitmap_pool;
-
-       /* Helpers for fast bitmap calculations */
-       unsigned long *bb_template;
-       unsigned long *bb_aux;
-
-       unsigned long d_seq_nr;         /* Data line unique sequence number */
-       unsigned long l_seq_nr;         /* Log line unique sequence number */
-
-       spinlock_t free_lock;
-       spinlock_t close_lock;
-       spinlock_t gc_lock;
-};
-
-struct pblk_line_meta {
-       unsigned int smeta_len;         /* Total length for smeta */
-       unsigned int smeta_sec;         /* Sectors needed for smeta */
-
-       unsigned int emeta_len[4];      /* Lengths for emeta:
-                                        *  [0]: Total
-                                        *  [1]: struct line_emeta +
-                                        *       bb_bitmap + struct wa_counters
-                                        *  [2]: L2P portion
-                                        *  [3]: vsc
-                                        */
-       unsigned int emeta_sec[4];      /* Sectors needed for emeta. Same layout
-                                        * as emeta_len
-                                        */
-
-       unsigned int emeta_bb;          /* Boundary for bb that affects emeta */
-
-       unsigned int vsc_list_len;      /* Length for vsc list */
-       unsigned int sec_bitmap_len;    /* Length for sector bitmap in line */
-       unsigned int blk_bitmap_len;    /* Length for block bitmap in line */
-       unsigned int lun_bitmap_len;    /* Length for lun bitmap in line */
-
-       unsigned int blk_per_line;      /* Number of blocks in a full line */
-       unsigned int sec_per_line;      /* Number of sectors in a line */
-       unsigned int dsec_per_line;     /* Number of data sectors in a line */
-       unsigned int min_blk_line;      /* Min. number of good blocks in line */
-
-       unsigned int mid_thrs;          /* Threshold for GC mid list */
-       unsigned int high_thrs;         /* Threshold for GC high list */
-
-       unsigned int meta_distance;     /* Distance between data and metadata */
-};
-
-enum {
-       PBLK_STATE_RUNNING = 0,
-       PBLK_STATE_STOPPING = 1,
-       PBLK_STATE_RECOVERING = 2,
-       PBLK_STATE_STOPPED = 3,
-};
-
-/* Internal format to support not power-of-2 device formats */
-struct pblk_addrf {
-       /* gen to dev */
-       int sec_stripe;
-       int ch_stripe;
-       int lun_stripe;
-
-       /* dev to gen */
-       int sec_lun_stripe;
-       int sec_ws_stripe;
-};
-
-struct pblk {
-       struct nvm_tgt_dev *dev;
-       struct gendisk *disk;
-
-       struct kobject kobj;
-
-       struct pblk_lun *luns;
-
-       struct pblk_line *lines;                /* Line array */
-       struct pblk_line_mgmt l_mg;             /* Line management */
-       struct pblk_line_meta lm;               /* Line metadata */
-
-       struct nvm_addrf addrf;         /* Aligned address format */
-       struct pblk_addrf uaddrf;       /* Unaligned address format */
-       int addrf_len;
-
-       struct pblk_rb rwb;
-
-       int state;                      /* pblk line state */
-
-       int min_write_pgs; /* Minimum amount of pages required by controller */
-       int min_write_pgs_data; /* Minimum amount of payload pages */
-       int max_write_pgs; /* Maximum amount of pages supported by controller */
-       int oob_meta_size; /* Size of OOB sector metadata */
-
-       sector_t capacity; /* Device capacity when bad blocks are subtracted */
-
-       int op;      /* Percentage of device used for over-provisioning */
-       int op_blks; /* Number of blocks used for over-provisioning */
-
-       /* pblk provisioning values. Used by rate limiter */
-       struct pblk_rl rl;
-
-       int sec_per_write;
-
-       guid_t instance_uuid;
-
-       /* Persistent write amplification counters, 4kb sector I/Os */
-       atomic64_t user_wa;             /* Sectors written by user */
-       atomic64_t gc_wa;               /* Sectors written by GC */
-       atomic64_t pad_wa;              /* Padded sectors written */
-
-       /* Reset values for delta write amplification measurements */
-       u64 user_rst_wa;
-       u64 gc_rst_wa;
-       u64 pad_rst_wa;
-
-       /* Counters used for calculating padding distribution */
-       atomic64_t *pad_dist;           /* Padding distribution buckets */
-       u64 nr_flush_rst;               /* Flushes reset value for pad dist.*/
-       atomic64_t nr_flush;            /* Number of flush/fua I/O */
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-       /* Non-persistent debug counters, 4kb sector I/Os */
-       atomic_long_t inflight_writes;  /* Inflight writes (user and gc) */
-       atomic_long_t padded_writes;    /* Sectors padded due to flush/fua */
-       atomic_long_t padded_wb;        /* Sectors padded in write buffer */
-       atomic_long_t req_writes;       /* Sectors stored on write buffer */
-       atomic_long_t sub_writes;       /* Sectors submitted from buffer */
-       atomic_long_t sync_writes;      /* Sectors synced to media */
-       atomic_long_t inflight_reads;   /* Inflight sector read requests */
-       atomic_long_t cache_reads;      /* Read requests that hit the cache */
-       atomic_long_t sync_reads;       /* Completed sector read requests */
-       atomic_long_t recov_writes;     /* Sectors submitted from recovery */
-       atomic_long_t recov_gc_writes;  /* Sectors submitted from write GC */
-       atomic_long_t recov_gc_reads;   /* Sectors submitted from read GC */
-#endif
-
-       spinlock_t lock;
-
-       atomic_long_t read_failed;
-       atomic_long_t read_empty;
-       atomic_long_t read_high_ecc;
-       atomic_long_t read_failed_gc;
-       atomic_long_t write_failed;
-       atomic_long_t erase_failed;
-
-       atomic_t inflight_io;           /* General inflight I/O counter */
-
-       struct task_struct *writer_ts;
-
-       /* Simple translation map of logical addresses to physical addresses.
-        * The logical addresses is known by the host system, while the physical
-        * addresses are used when writing to the disk block device.
-        */
-       unsigned char *trans_map;
-       spinlock_t trans_lock;
-
-       struct list_head compl_list;
-
-       spinlock_t resubmit_lock;        /* Resubmit list lock */
-       struct list_head resubmit_list; /* Resubmit list for failed writes*/
-
-       mempool_t page_bio_pool;
-       mempool_t gen_ws_pool;
-       mempool_t rec_pool;
-       mempool_t r_rq_pool;
-       mempool_t w_rq_pool;
-       mempool_t e_rq_pool;
-
-       struct workqueue_struct *close_wq;
-       struct workqueue_struct *bb_wq;
-       struct workqueue_struct *r_end_wq;
-
-       struct timer_list wtimer;
-
-       struct pblk_gc gc;
-};
-
-struct pblk_line_ws {
-       struct pblk *pblk;
-       struct pblk_line *line;
-       void *priv;
-       struct work_struct ws;
-};
-
-#define pblk_g_rq_size (sizeof(struct nvm_rq) + sizeof(struct pblk_g_ctx))
-#define pblk_w_rq_size (sizeof(struct nvm_rq) + sizeof(struct pblk_c_ctx))
-
-#define pblk_err(pblk, fmt, ...)                       \
-       pr_err("pblk %s: " fmt, pblk->disk->disk_name, ##__VA_ARGS__)
-#define pblk_info(pblk, fmt, ...)                      \
-       pr_info("pblk %s: " fmt, pblk->disk->disk_name, ##__VA_ARGS__)
-#define pblk_warn(pblk, fmt, ...)                      \
-       pr_warn("pblk %s: " fmt, pblk->disk->disk_name, ##__VA_ARGS__)
-#define pblk_debug(pblk, fmt, ...)                     \
-       pr_debug("pblk %s: " fmt, pblk->disk->disk_name, ##__VA_ARGS__)
-
-/*
- * pblk ring buffer operations
- */
-int pblk_rb_init(struct pblk_rb *rb, unsigned int size, unsigned int threshold,
-                unsigned int seg_sz);
-int pblk_rb_may_write_user(struct pblk_rb *rb, struct bio *bio,
-                          unsigned int nr_entries, unsigned int *pos);
-int pblk_rb_may_write_gc(struct pblk_rb *rb, unsigned int nr_entries,
-                        unsigned int *pos);
-void pblk_rb_write_entry_user(struct pblk_rb *rb, void *data,
-                             struct pblk_w_ctx w_ctx, unsigned int pos);
-void pblk_rb_write_entry_gc(struct pblk_rb *rb, void *data,
-                           struct pblk_w_ctx w_ctx, struct pblk_line *line,
-                           u64 paddr, unsigned int pos);
-struct pblk_w_ctx *pblk_rb_w_ctx(struct pblk_rb *rb, unsigned int pos);
-void pblk_rb_flush(struct pblk_rb *rb);
-
-void pblk_rb_sync_l2p(struct pblk_rb *rb);
-unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd,
-                                unsigned int pos, unsigned int nr_entries,
-                                unsigned int count);
-int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
-                       struct ppa_addr ppa);
-unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int entries);
-
-unsigned int pblk_rb_sync_init(struct pblk_rb *rb, unsigned long *flags);
-unsigned int pblk_rb_sync_advance(struct pblk_rb *rb, unsigned int nr_entries);
-unsigned int pblk_rb_ptr_wrap(struct pblk_rb *rb, unsigned int p,
-                             unsigned int nr_entries);
-void pblk_rb_sync_end(struct pblk_rb *rb, unsigned long *flags);
-unsigned int pblk_rb_flush_point_count(struct pblk_rb *rb);
-
-unsigned int pblk_rb_read_count(struct pblk_rb *rb);
-unsigned int pblk_rb_sync_count(struct pblk_rb *rb);
-unsigned int pblk_rb_wrap_pos(struct pblk_rb *rb, unsigned int pos);
-
-int pblk_rb_tear_down_check(struct pblk_rb *rb);
-int pblk_rb_pos_oob(struct pblk_rb *rb, u64 pos);
-void pblk_rb_free(struct pblk_rb *rb);
-ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf);
-
-/*
- * pblk core
- */
-struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type);
-void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type);
-int pblk_alloc_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd);
-void pblk_free_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd);
-void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write);
-int pblk_setup_w_rec_rq(struct pblk *pblk, struct nvm_rq *rqd,
-                       struct pblk_c_ctx *c_ctx);
-void pblk_discard(struct pblk *pblk, struct bio *bio);
-struct nvm_chk_meta *pblk_get_chunk_meta(struct pblk *pblk);
-struct nvm_chk_meta *pblk_chunk_get_off(struct pblk *pblk,
-                                             struct nvm_chk_meta *lp,
-                                             struct ppa_addr ppa);
-void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd);
-void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd);
-int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd, void *buf);
-int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd, void *buf);
-int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line);
-void pblk_check_chunk_state_update(struct pblk *pblk, struct nvm_rq *rqd);
-struct pblk_line *pblk_line_get(struct pblk *pblk);
-struct pblk_line *pblk_line_get_first_data(struct pblk *pblk);
-struct pblk_line *pblk_line_replace_data(struct pblk *pblk);
-void pblk_ppa_to_line_put(struct pblk *pblk, struct ppa_addr ppa);
-void pblk_rq_to_line_put(struct pblk *pblk, struct nvm_rq *rqd);
-int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line);
-void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line);
-struct pblk_line *pblk_line_get_data(struct pblk *pblk);
-struct pblk_line *pblk_line_get_erase(struct pblk *pblk);
-int pblk_line_erase(struct pblk *pblk, struct pblk_line *line);
-int pblk_line_is_full(struct pblk_line *line);
-void pblk_line_free(struct pblk_line *line);
-void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line);
-void pblk_line_close(struct pblk *pblk, struct pblk_line *line);
-void pblk_line_close_ws(struct work_struct *work);
-void pblk_pipeline_stop(struct pblk *pblk);
-void __pblk_pipeline_stop(struct pblk *pblk);
-void __pblk_pipeline_flush(struct pblk *pblk);
-void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
-                    void (*work)(struct work_struct *), gfp_t gfp_mask,
-                    struct workqueue_struct *wq);
-u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line);
-int pblk_line_smeta_read(struct pblk *pblk, struct pblk_line *line);
-int pblk_line_emeta_read(struct pblk *pblk, struct pblk_line *line,
-                        void *emeta_buf);
-int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr erase_ppa);
-void pblk_line_put(struct kref *ref);
-void pblk_line_put_wq(struct kref *ref);
-struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line);
-u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line);
-void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
-u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
-u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
-int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
-                  unsigned long secs_to_flush, bool skip_meta);
-void pblk_down_rq(struct pblk *pblk, struct ppa_addr ppa,
-                 unsigned long *lun_bitmap);
-void pblk_down_chunk(struct pblk *pblk, struct ppa_addr ppa);
-void pblk_up_chunk(struct pblk *pblk, struct ppa_addr ppa);
-void pblk_up_rq(struct pblk *pblk, unsigned long *lun_bitmap);
-int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
-                      int nr_pages);
-void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
-                        int nr_pages);
-void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa);
-void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
-                          u64 paddr);
-void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa);
-void pblk_update_map_cache(struct pblk *pblk, sector_t lba,
-                          struct ppa_addr ppa);
-void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
-                        struct ppa_addr ppa, struct ppa_addr entry_line);
-int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
-                      struct pblk_line *gc_line, u64 paddr);
-void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
-                         u64 *lba_list, int nr_secs);
-int pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
-                        sector_t blba, int nr_secs, bool *from_cache);
-void *pblk_get_meta_for_writes(struct pblk *pblk, struct nvm_rq *rqd);
-void pblk_get_packed_meta(struct pblk *pblk, struct nvm_rq *rqd);
-
-/*
- * pblk user I/O write path
- */
-void pblk_write_to_cache(struct pblk *pblk, struct bio *bio,
-                       unsigned long flags);
-int pblk_write_gc_to_cache(struct pblk *pblk, struct pblk_gc_rq *gc_rq);
-
-/*
- * pblk map
- */
-int pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
-                      unsigned int sentry, unsigned long *lun_bitmap,
-                      unsigned int valid_secs, struct ppa_addr *erase_ppa);
-int pblk_map_rq(struct pblk *pblk, struct nvm_rq *rqd, unsigned int sentry,
-                unsigned long *lun_bitmap, unsigned int valid_secs,
-                unsigned int off);
-
-/*
- * pblk write thread
- */
-int pblk_write_ts(void *data);
-void pblk_write_timer_fn(struct timer_list *t);
-void pblk_write_should_kick(struct pblk *pblk);
-void pblk_write_kick(struct pblk *pblk);
-
-/*
- * pblk read path
- */
-extern struct bio_set pblk_bio_set;
-void pblk_submit_read(struct pblk *pblk, struct bio *bio);
-int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq);
-/*
- * pblk recovery
- */
-struct pblk_line *pblk_recov_l2p(struct pblk *pblk);
-int pblk_recov_pad(struct pblk *pblk);
-int pblk_recov_check_emeta(struct pblk *pblk, struct line_emeta *emeta);
-
-/*
- * pblk gc
- */
-#define PBLK_GC_MAX_READERS 8  /* Max number of outstanding GC reader jobs */
-#define PBLK_GC_RQ_QD 128      /* Queue depth for inflight GC requests */
-#define PBLK_GC_L_QD 4         /* Queue depth for inflight GC lines */
-
-int pblk_gc_init(struct pblk *pblk);
-void pblk_gc_exit(struct pblk *pblk, bool graceful);
-void pblk_gc_should_start(struct pblk *pblk);
-void pblk_gc_should_stop(struct pblk *pblk);
-void pblk_gc_should_kick(struct pblk *pblk);
-void pblk_gc_free_full_lines(struct pblk *pblk);
-void pblk_gc_sysfs_state_show(struct pblk *pblk, int *gc_enabled,
-                             int *gc_active);
-int pblk_gc_sysfs_force(struct pblk *pblk, int force);
-void pblk_put_line_back(struct pblk *pblk, struct pblk_line *line);
-
-/*
- * pblk rate limiter
- */
-void pblk_rl_init(struct pblk_rl *rl, int budget, int threshold);
-void pblk_rl_free(struct pblk_rl *rl);
-void pblk_rl_update_rates(struct pblk_rl *rl);
-int pblk_rl_high_thrs(struct pblk_rl *rl);
-unsigned long pblk_rl_nr_free_blks(struct pblk_rl *rl);
-unsigned long pblk_rl_nr_user_free_blks(struct pblk_rl *rl);
-int pblk_rl_user_may_insert(struct pblk_rl *rl, int nr_entries);
-void pblk_rl_inserted(struct pblk_rl *rl, int nr_entries);
-void pblk_rl_user_in(struct pblk_rl *rl, int nr_entries);
-int pblk_rl_gc_may_insert(struct pblk_rl *rl, int nr_entries);
-void pblk_rl_gc_in(struct pblk_rl *rl, int nr_entries);
-void pblk_rl_out(struct pblk_rl *rl, int nr_user, int nr_gc);
-int pblk_rl_max_io(struct pblk_rl *rl);
-void pblk_rl_free_lines_inc(struct pblk_rl *rl, struct pblk_line *line);
-void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line,
-                           bool used);
-int pblk_rl_is_limit(struct pblk_rl *rl);
-
-void pblk_rl_werr_line_in(struct pblk_rl *rl);
-void pblk_rl_werr_line_out(struct pblk_rl *rl);
-
-/*
- * pblk sysfs
- */
-int pblk_sysfs_init(struct gendisk *tdisk);
-void pblk_sysfs_exit(struct gendisk *tdisk);
-
-static inline struct nvm_rq *nvm_rq_from_c_ctx(void *c_ctx)
-{
-       return c_ctx - sizeof(struct nvm_rq);
-}
-
-static inline void *emeta_to_bb(struct line_emeta *emeta)
-{
-       return emeta->bb_bitmap;
-}
-
-static inline void *emeta_to_wa(struct pblk_line_meta *lm,
-                               struct line_emeta *emeta)
-{
-       return emeta->bb_bitmap + lm->blk_bitmap_len;
-}
-
-static inline void *emeta_to_lbas(struct pblk *pblk, struct line_emeta *emeta)
-{
-       return ((void *)emeta + pblk->lm.emeta_len[1]);
-}
-
-static inline void *emeta_to_vsc(struct pblk *pblk, struct line_emeta *emeta)
-{
-       return (emeta_to_lbas(pblk, emeta) + pblk->lm.emeta_len[2]);
-}
-
-static inline int pblk_line_vsc(struct pblk_line *line)
-{
-       return le32_to_cpu(*line->vsc);
-}
-
-static inline int pblk_ppa_to_line_id(struct ppa_addr p)
-{
-       return p.a.blk;
-}
-
-static inline struct pblk_line *pblk_ppa_to_line(struct pblk *pblk,
-                                                struct ppa_addr p)
-{
-       return &pblk->lines[pblk_ppa_to_line_id(p)];
-}
-
-static inline int pblk_ppa_to_pos(struct nvm_geo *geo, struct ppa_addr p)
-{
-       return p.a.lun * geo->num_ch + p.a.ch;
-}
-
-static inline struct ppa_addr addr_to_gen_ppa(struct pblk *pblk, u64 paddr,
-                                             u64 line_id)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct ppa_addr ppa;
-
-       if (geo->version == NVM_OCSSD_SPEC_12) {
-               struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&pblk->addrf;
-
-               ppa.ppa = 0;
-               ppa.g.blk = line_id;
-               ppa.g.pg = (paddr & ppaf->pg_mask) >> ppaf->pg_offset;
-               ppa.g.lun = (paddr & ppaf->lun_mask) >> ppaf->lun_offset;
-               ppa.g.ch = (paddr & ppaf->ch_mask) >> ppaf->ch_offset;
-               ppa.g.pl = (paddr & ppaf->pln_mask) >> ppaf->pln_offset;
-               ppa.g.sec = (paddr & ppaf->sec_mask) >> ppaf->sec_offset;
-       } else {
-               struct pblk_addrf *uaddrf = &pblk->uaddrf;
-               int secs, chnls, luns;
-
-               ppa.ppa = 0;
-
-               ppa.m.chk = line_id;
-
-               paddr = div_u64_rem(paddr, uaddrf->sec_stripe, &secs);
-               ppa.m.sec = secs;
-
-               paddr = div_u64_rem(paddr, uaddrf->ch_stripe, &chnls);
-               ppa.m.grp = chnls;
-
-               paddr = div_u64_rem(paddr, uaddrf->lun_stripe, &luns);
-               ppa.m.pu = luns;
-
-               ppa.m.sec += uaddrf->sec_stripe * paddr;
-       }
-
-       return ppa;
-}
-
-static inline struct nvm_chk_meta *pblk_dev_ppa_to_chunk(struct pblk *pblk,
-                                                       struct ppa_addr p)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct pblk_line *line = pblk_ppa_to_line(pblk, p);
-       int pos = pblk_ppa_to_pos(geo, p);
-
-       return &line->chks[pos];
-}
-
-static inline u64 pblk_dev_ppa_to_chunk_addr(struct pblk *pblk,
-                                                       struct ppa_addr p)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-
-       return dev_to_chunk_addr(dev->parent, &pblk->addrf, p);
-}
-
-static inline u64 pblk_dev_ppa_to_line_addr(struct pblk *pblk,
-                                                       struct ppa_addr p)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       u64 paddr;
-
-       if (geo->version == NVM_OCSSD_SPEC_12) {
-               struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&pblk->addrf;
-
-               paddr = (u64)p.g.ch << ppaf->ch_offset;
-               paddr |= (u64)p.g.lun << ppaf->lun_offset;
-               paddr |= (u64)p.g.pg << ppaf->pg_offset;
-               paddr |= (u64)p.g.pl << ppaf->pln_offset;
-               paddr |= (u64)p.g.sec << ppaf->sec_offset;
-       } else {
-               struct pblk_addrf *uaddrf = &pblk->uaddrf;
-               u64 secs = p.m.sec;
-               int sec_stripe;
-
-               paddr = (u64)p.m.grp * uaddrf->sec_stripe;
-               paddr += (u64)p.m.pu * uaddrf->sec_lun_stripe;
-
-               secs = div_u64_rem(secs, uaddrf->sec_stripe, &sec_stripe);
-               paddr += secs * uaddrf->sec_ws_stripe;
-               paddr += sec_stripe;
-       }
-
-       return paddr;
-}
-
-static inline struct ppa_addr pblk_ppa32_to_ppa64(struct pblk *pblk, u32 ppa32)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-
-       return nvm_ppa32_to_ppa64(dev->parent, &pblk->addrf, ppa32);
-}
-
-static inline u32 pblk_ppa64_to_ppa32(struct pblk *pblk, struct ppa_addr ppa64)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-
-       return nvm_ppa64_to_ppa32(dev->parent, &pblk->addrf, ppa64);
-}
-
-static inline struct ppa_addr pblk_trans_map_get(struct pblk *pblk,
-                                                               sector_t lba)
-{
-       struct ppa_addr ppa;
-
-       if (pblk->addrf_len < 32) {
-               u32 *map = (u32 *)pblk->trans_map;
-
-               ppa = pblk_ppa32_to_ppa64(pblk, map[lba]);
-       } else {
-               struct ppa_addr *map = (struct ppa_addr *)pblk->trans_map;
-
-               ppa = map[lba];
-       }
-
-       return ppa;
-}
-
-static inline void pblk_trans_map_set(struct pblk *pblk, sector_t lba,
-                                               struct ppa_addr ppa)
-{
-       if (pblk->addrf_len < 32) {
-               u32 *map = (u32 *)pblk->trans_map;
-
-               map[lba] = pblk_ppa64_to_ppa32(pblk, ppa);
-       } else {
-               u64 *map = (u64 *)pblk->trans_map;
-
-               map[lba] = ppa.ppa;
-       }
-}
-
-static inline int pblk_ppa_empty(struct ppa_addr ppa_addr)
-{
-       return (ppa_addr.ppa == ADDR_EMPTY);
-}
-
-static inline void pblk_ppa_set_empty(struct ppa_addr *ppa_addr)
-{
-       ppa_addr->ppa = ADDR_EMPTY;
-}
-
-static inline bool pblk_ppa_comp(struct ppa_addr lppa, struct ppa_addr rppa)
-{
-       return (lppa.ppa == rppa.ppa);
-}
-
-static inline int pblk_addr_in_cache(struct ppa_addr ppa)
-{
-       return (ppa.ppa != ADDR_EMPTY && ppa.c.is_cached);
-}
-
-static inline int pblk_addr_to_cacheline(struct ppa_addr ppa)
-{
-       return ppa.c.line;
-}
-
-static inline struct ppa_addr pblk_cacheline_to_addr(int addr)
-{
-       struct ppa_addr p;
-
-       p.c.line = addr;
-       p.c.is_cached = 1;
-
-       return p;
-}
-
-static inline u32 pblk_calc_meta_header_crc(struct pblk *pblk,
-                                           struct line_header *header)
-{
-       u32 crc = ~(u32)0;
-
-       crc = crc32_le(crc, (unsigned char *)header + sizeof(crc),
-                               sizeof(struct line_header) - sizeof(crc));
-
-       return crc;
-}
-
-static inline u32 pblk_calc_smeta_crc(struct pblk *pblk,
-                                     struct line_smeta *smeta)
-{
-       struct pblk_line_meta *lm = &pblk->lm;
-       u32 crc = ~(u32)0;
-
-       crc = crc32_le(crc, (unsigned char *)smeta +
-                               sizeof(struct line_header) + sizeof(crc),
-                               lm->smeta_len -
-                               sizeof(struct line_header) - sizeof(crc));
-
-       return crc;
-}
-
-static inline u32 pblk_calc_emeta_crc(struct pblk *pblk,
-                                     struct line_emeta *emeta)
-{
-       struct pblk_line_meta *lm = &pblk->lm;
-       u32 crc = ~(u32)0;
-
-       crc = crc32_le(crc, (unsigned char *)emeta +
-                               sizeof(struct line_header) + sizeof(crc),
-                               lm->emeta_len[0] -
-                               sizeof(struct line_header) - sizeof(crc));
-
-       return crc;
-}
-
-static inline int pblk_io_aligned(struct pblk *pblk, int nr_secs)
-{
-       return !(nr_secs % pblk->min_write_pgs);
-}
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-static inline void print_ppa(struct pblk *pblk, struct ppa_addr *p,
-                            char *msg, int error)
-{
-       struct nvm_geo *geo = &pblk->dev->geo;
-
-       if (p->c.is_cached) {
-               pblk_err(pblk, "ppa: (%s: %x) cache line: %llu\n",
-                               msg, error, (u64)p->c.line);
-       } else if (geo->version == NVM_OCSSD_SPEC_12) {
-               pblk_err(pblk, "ppa: (%s: %x):ch:%d,lun:%d,blk:%d,pg:%d,pl:%d,sec:%d\n",
-                       msg, error,
-                       p->g.ch, p->g.lun, p->g.blk,
-                       p->g.pg, p->g.pl, p->g.sec);
-       } else {
-               pblk_err(pblk, "ppa: (%s: %x):ch:%d,lun:%d,chk:%d,sec:%d\n",
-                       msg, error,
-                       p->m.grp, p->m.pu, p->m.chk, p->m.sec);
-       }
-}
-
-static inline void pblk_print_failed_rqd(struct pblk *pblk, struct nvm_rq *rqd,
-                                        int error)
-{
-       int bit = -1;
-
-       if (rqd->nr_ppas ==  1) {
-               print_ppa(pblk, &rqd->ppa_addr, "rqd", error);
-               return;
-       }
-
-       while ((bit = find_next_bit((void *)&rqd->ppa_status, rqd->nr_ppas,
-                                               bit + 1)) < rqd->nr_ppas) {
-               print_ppa(pblk, &rqd->ppa_list[bit], "rqd", error);
-       }
-
-       pblk_err(pblk, "error:%d, ppa_status:%llx\n", error, rqd->ppa_status);
-}
-
-static inline int pblk_boundary_ppa_checks(struct nvm_tgt_dev *tgt_dev,
-                                      struct ppa_addr *ppas, int nr_ppas)
-{
-       struct nvm_geo *geo = &tgt_dev->geo;
-       struct ppa_addr *ppa;
-       int i;
-
-       for (i = 0; i < nr_ppas; i++) {
-               ppa = &ppas[i];
-
-               if (geo->version == NVM_OCSSD_SPEC_12) {
-                       if (!ppa->c.is_cached &&
-                                       ppa->g.ch < geo->num_ch &&
-                                       ppa->g.lun < geo->num_lun &&
-                                       ppa->g.pl < geo->num_pln &&
-                                       ppa->g.blk < geo->num_chk &&
-                                       ppa->g.pg < geo->num_pg &&
-                                       ppa->g.sec < geo->ws_min)
-                               continue;
-               } else {
-                       if (!ppa->c.is_cached &&
-                                       ppa->m.grp < geo->num_ch &&
-                                       ppa->m.pu < geo->num_lun &&
-                                       ppa->m.chk < geo->num_chk &&
-                                       ppa->m.sec < geo->clba)
-                               continue;
-               }
-
-               print_ppa(tgt_dev->q->queuedata, ppa, "boundary", i);
-
-               return 1;
-       }
-       return 0;
-}
-
-static inline int pblk_check_io(struct pblk *pblk, struct nvm_rq *rqd)
-{
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
-
-       if (pblk_boundary_ppa_checks(dev, ppa_list, rqd->nr_ppas)) {
-               WARN_ON(1);
-               return -EINVAL;
-       }
-
-       if (rqd->opcode == NVM_OP_PWRITE) {
-               struct pblk_line *line;
-               int i;
-
-               for (i = 0; i < rqd->nr_ppas; i++) {
-                       line = pblk_ppa_to_line(pblk, ppa_list[i]);
-
-                       spin_lock(&line->lock);
-                       if (line->state != PBLK_LINESTATE_OPEN) {
-                               pblk_err(pblk, "bad ppa: line:%d,state:%d\n",
-                                                       line->id, line->state);
-                               WARN_ON(1);
-                               spin_unlock(&line->lock);
-                               return -EINVAL;
-                       }
-                       spin_unlock(&line->lock);
-               }
-       }
-
-       return 0;
-}
-#endif
-
-static inline int pblk_boundary_paddr_checks(struct pblk *pblk, u64 paddr)
-{
-       struct pblk_line_meta *lm = &pblk->lm;
-
-       if (paddr > lm->sec_per_line)
-               return 1;
-
-       return 0;
-}
-
-static inline unsigned int pblk_get_bi_idx(struct bio *bio)
-{
-       return bio->bi_iter.bi_idx;
-}
-
-static inline sector_t pblk_get_lba(struct bio *bio)
-{
-       return bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
-}
-
-static inline unsigned int pblk_get_secs(struct bio *bio)
-{
-       return  bio->bi_iter.bi_size / PBLK_EXPOSED_PAGE_SIZE;
-}
-
-static inline char *pblk_disk_name(struct pblk *pblk)
-{
-       struct gendisk *disk = pblk->disk;
-
-       return disk->disk_name;
-}
-
-static inline unsigned int pblk_get_min_chks(struct pblk *pblk)
-{
-       struct pblk_line_meta *lm = &pblk->lm;
-       /* In a worst-case scenario every line will have OP invalid sectors.
-        * We will then need a minimum of 1/OP lines to free up a single line
-        */
-
-       return DIV_ROUND_UP(100, pblk->op) * lm->blk_per_line;
-}
-
-static inline struct pblk_sec_meta *pblk_get_meta(struct pblk *pblk,
-                                                        void *meta, int index)
-{
-       return meta +
-              max_t(int, sizeof(struct pblk_sec_meta), pblk->oob_meta_size)
-              * index;
-}
-
-static inline int pblk_dma_meta_size(struct pblk *pblk)
-{
-       return max_t(int, sizeof(struct pblk_sec_meta), pblk->oob_meta_size)
-              * NVM_MAX_VLBA;
-}
-
-static inline int pblk_is_oob_meta_supported(struct pblk *pblk)
-{
-       return pblk->oob_meta_size >= sizeof(struct pblk_sec_meta);
-}
-#endif /* PBLK_H_ */
index 0602e82..f45fb37 100644 (file)
@@ -15,6 +15,7 @@ if MD
 
 config BLK_DEV_MD
        tristate "RAID support"
+       select BLOCK_HOLDER_DEPRECATED if SYSFS
        help
          This driver lets you combine several hard disk partitions into one
          logical block device. This can be used to simply append one
@@ -201,6 +202,7 @@ config BLK_DEV_DM_BUILTIN
 
 config BLK_DEV_DM
        tristate "Device mapper support"
+       select BLOCK_HOLDER_DEPRECATED if SYSFS
        select BLK_DEV_DM_BUILTIN
        depends on DAX || DAX=n
        help
@@ -340,7 +342,7 @@ config DM_WRITECACHE
 
 config DM_EBS
        tristate "Emulated block size target (EXPERIMENTAL)"
-       depends on BLK_DEV_DM
+       depends on BLK_DEV_DM && !HIGHMEM
        select DM_BUFIO
        help
          dm-ebs emulates smaller logical block size on backing devices
index d1ca4d0..cf3e809 100644 (file)
@@ -2,6 +2,7 @@
 
 config BCACHE
        tristate "Block device as cache"
+       select BLOCK_HOLDER_DEPRECATED if SYSFS
        select CRC64
        help
        Allows a block device to be used as cache for other devices; uses
index 183a58c..0595559 100644 (file)
@@ -378,7 +378,7 @@ static void do_btree_node_write(struct btree *b)
                struct bvec_iter_all iter_all;
 
                bio_for_each_segment_all(bv, b->bio, iter_all) {
-                       memcpy(page_address(bv->bv_page), addr, PAGE_SIZE);
+                       memcpy(bvec_virt(bv), addr, PAGE_SIZE);
                        addr += PAGE_SIZE;
                }
 
index 185246a..f2874c7 100644 (file)
@@ -885,11 +885,6 @@ static void bcache_device_free(struct bcache_device *d)
                bcache_device_detach(d);
 
        if (disk) {
-               bool disk_added = (disk->flags & GENHD_FL_UP) != 0;
-
-               if (disk_added)
-                       del_gendisk(disk);
-
                blk_cleanup_disk(disk);
                ida_simple_remove(&bcache_device_idx,
                                  first_minor_to_idx(disk->first_minor));
@@ -931,20 +926,20 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
        n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long);
        d->full_dirty_stripes = kvzalloc(n, GFP_KERNEL);
        if (!d->full_dirty_stripes)
-               return -ENOMEM;
+               goto out_free_stripe_sectors_dirty;
 
        idx = ida_simple_get(&bcache_device_idx, 0,
                                BCACHE_DEVICE_IDX_MAX, GFP_KERNEL);
        if (idx < 0)
-               return idx;
+               goto out_free_full_dirty_stripes;
 
        if (bioset_init(&d->bio_split, 4, offsetof(struct bbio, bio),
                        BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER))
-               goto err;
+               goto out_ida_remove;
 
        d->disk = blk_alloc_disk(NUMA_NO_NODE);
        if (!d->disk)
-               goto err;
+               goto out_bioset_exit;
 
        set_capacity(d->disk, sectors);
        snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", idx);
@@ -987,8 +982,14 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
 
        return 0;
 
-err:
+out_bioset_exit:
+       bioset_exit(&d->bio_split);
+out_ida_remove:
        ida_simple_remove(&bcache_device_idx, idx);
+out_free_full_dirty_stripes:
+       kvfree(d->full_dirty_stripes);
+out_free_stripe_sectors_dirty:
+       kvfree(d->stripe_sectors_dirty);
        return -ENOMEM;
 
 }
@@ -1365,8 +1366,10 @@ static void cached_dev_free(struct closure *cl)
 
        mutex_lock(&bch_register_lock);
 
-       if (atomic_read(&dc->running))
+       if (atomic_read(&dc->running)) {
                bd_unlink_disk_holder(dc->bdev, dc->disk.disk);
+               del_gendisk(dc->disk.disk);
+       }
        bcache_device_free(&dc->disk);
        list_del(&dc->list);
 
@@ -1512,6 +1515,7 @@ static void flash_dev_free(struct closure *cl)
        mutex_lock(&bch_register_lock);
        atomic_long_sub(bcache_dev_sectors_dirty(d),
                        &d->c->flash_dev_dirty_sectors);
+       del_gendisk(d->disk);
        bcache_device_free(d);
        mutex_unlock(&bch_register_lock);
        kobject_put(&d->kobj);
index bca4a7c..b64460a 100644 (file)
@@ -15,8 +15,6 @@
 
 #include "closure.h"
 
-#define PAGE_SECTORS           (PAGE_SIZE / 512)
-
 struct closure;
 
 #ifdef CONFIG_BCACHE_DEBUG
index 71475a2..0c509da 100644 (file)
@@ -74,7 +74,7 @@ static int __ebs_rw_bvec(struct ebs_c *ec, int rw, struct bio_vec *bv, struct bv
        if (unlikely(!bv->bv_page || !bv_len))
                return -EIO;
 
-       pa = page_address(bv->bv_page) + bv->bv_offset;
+       pa = bvec_virt(bv);
 
        /* Handle overlapping page <-> blocks */
        while (bv_len) {
index 20f2510..a9ea361 100644 (file)
@@ -1819,7 +1819,7 @@ again:
                                unsigned this_len;
 
                                BUG_ON(PageHighMem(biv.bv_page));
-                               tag = lowmem_page_address(biv.bv_page) + biv.bv_offset;
+                               tag = bvec_virt(&biv);
                                this_len = min(biv.bv_len, data_to_process);
                                r = dm_integrity_rw_tag(ic, tag, &dio->metadata_block, &dio->metadata_offset,
                                                        this_len, dio->op == REQ_OP_READ ? TAG_READ : TAG_WRITE);
@@ -2006,7 +2006,7 @@ retry_kmap:
                                        unsigned tag_now = min(biv.bv_len, tag_todo);
                                        char *tag_addr;
                                        BUG_ON(PageHighMem(biv.bv_page));
-                                       tag_addr = lowmem_page_address(biv.bv_page) + biv.bv_offset;
+                                       tag_addr = bvec_virt(&biv);
                                        if (likely(dio->op == REQ_OP_WRITE))
                                                memcpy(tag_ptr, tag_addr, tag_now);
                                        else
index 2209cbc..2575074 100644 (file)
@@ -1436,9 +1436,6 @@ static int table_load(struct file *filp, struct dm_ioctl *param, size_t param_si
        }
 
        if (dm_get_md_type(md) == DM_TYPE_NONE) {
-               /* Initial table load: acquire type of table. */
-               dm_set_md_type(md, dm_table_get_type(t));
-
                /* setup md->queue to reflect md's type (may block) */
                r = dm_setup_md_queue(md, t);
                if (r) {
@@ -2187,7 +2184,6 @@ int __init dm_early_create(struct dm_ioctl *dmi,
        if (r)
                goto err_destroy_table;
 
-       md->type = dm_table_get_type(t);
        /* setup md->queue to reflect md's type (may block) */
        r = dm_setup_md_queue(md, t);
        if (r) {
index 0dbd48c..5b95eea 100644 (file)
@@ -559,7 +559,6 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
        err = blk_mq_init_allocated_queue(md->tag_set, md->queue);
        if (err)
                goto out_tag_set;
-       elevator_init_mq(md->queue);
        return 0;
 
 out_tag_set:
index 0543cdf..b03eabc 100644 (file)
@@ -2076,7 +2076,7 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
        }
 
        dm_update_keyslot_manager(q, t);
-       blk_queue_update_readahead(q);
+       disk_update_readahead(t->md->disk);
 
        return 0;
 }
index e21e29e..3d2cf81 100644 (file)
@@ -1214,14 +1214,13 @@ static void memcpy_flushcache_optimized(void *dest, void *source, size_t size)
 static void bio_copy_block(struct dm_writecache *wc, struct bio *bio, void *data)
 {
        void *buf;
-       unsigned long flags;
        unsigned size;
        int rw = bio_data_dir(bio);
        unsigned remaining_size = wc->block_size;
 
        do {
                struct bio_vec bv = bio_iter_iovec(bio, bio->bi_iter);
-               buf = bvec_kmap_irq(&bv, &flags);
+               buf = bvec_kmap_local(&bv);
                size = bv.bv_len;
                if (unlikely(size > remaining_size))
                        size = remaining_size;
@@ -1239,7 +1238,7 @@ static void bio_copy_block(struct dm_writecache *wc, struct bio *bio, void *data
                        memcpy_flushcache_optimized(data, buf, size);
                }
 
-               bvec_kunmap_irq(buf, &flags);
+               kunmap_local(buf);
 
                data = (char *)data + size;
                remaining_size -= size;
index 2c5f9e5..7981b72 100644 (file)
@@ -1693,14 +1693,13 @@ static void cleanup_mapped_device(struct mapped_device *md)
                spin_lock(&_minor_lock);
                md->disk->private_data = NULL;
                spin_unlock(&_minor_lock);
-               del_gendisk(md->disk);
-       }
-
-       if (md->queue)
+               if (dm_get_md_type(md) != DM_TYPE_NONE) {
+                       dm_sysfs_exit(md);
+                       del_gendisk(md->disk);
+               }
                dm_queue_destroy_keyslot_manager(md->queue);
-
-       if (md->disk)
                blk_cleanup_disk(md->disk);
+       }
 
        cleanup_srcu_struct(&md->io_barrier);
 
@@ -1792,7 +1791,6 @@ static struct mapped_device *alloc_dev(int minor)
                        goto bad;
        }
 
-       add_disk_no_queue_reg(md->disk);
        format_dev_t(md->name, MKDEV(_major, minor));
 
        md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0);
@@ -1993,19 +1991,12 @@ static struct dm_table *__unbind(struct mapped_device *md)
  */
 int dm_create(int minor, struct mapped_device **result)
 {
-       int r;
        struct mapped_device *md;
 
        md = alloc_dev(minor);
        if (!md)
                return -ENXIO;
 
-       r = dm_sysfs_init(md);
-       if (r) {
-               free_dev(md);
-               return r;
-       }
-
        *result = md;
        return 0;
 }
@@ -2056,9 +2047,9 @@ EXPORT_SYMBOL_GPL(dm_get_queue_limits);
  */
 int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
 {
-       int r;
+       enum dm_queue_mode type = dm_table_get_type(t);
        struct queue_limits limits;
-       enum dm_queue_mode type = dm_get_md_type(md);
+       int r;
 
        switch (type) {
        case DM_TYPE_REQUEST_BASED:
@@ -2086,8 +2077,14 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
        if (r)
                return r;
 
-       blk_register_queue(md->disk);
+       add_disk(md->disk);
 
+       r = dm_sysfs_init(md);
+       if (r) {
+               del_gendisk(md->disk);
+               return r;
+       }
+       md->type = type;
        return 0;
 }
 
@@ -2193,7 +2190,6 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
                DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
                       dm_device_name(md), atomic_read(&md->holders));
 
-       dm_sysfs_exit(md);
        dm_table_destroy(__unbind(md));
        free_dev(md);
 }
index 832547c..4c96c36 100644 (file)
@@ -764,9 +764,7 @@ struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev);
 
 static inline bool is_mddev_broken(struct md_rdev *rdev, const char *md_type)
 {
-       int flags = rdev->bdev->bd_disk->flags;
-
-       if (!(flags & GENHD_FL_UP)) {
+       if (!disk_live(rdev->bdev->bd_disk)) {
                if (!test_and_set_bit(MD_BROKEN, &rdev->mddev->flags))
                        pr_warn("md: %s: %s array has a missing/failed member\n",
                                mdname(rdev->mddev), md_type);
index 51f2547..19598bd 100644 (file)
@@ -474,8 +474,6 @@ static void raid1_end_write_request(struct bio *bio)
                /*
                 * When the device is faulty, it is not necessary to
                 * handle write error.
-                * For failfast, this is the only remaining device,
-                * We need to retry the write without FailFast.
                 */
                if (!test_bit(Faulty, &rdev->flags))
                        set_bit(R1BIO_WriteError, &r1_bio->state);
@@ -1331,6 +1329,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
        struct raid1_plug_cb *plug = NULL;
        int first_clone;
        int max_sectors;
+       bool write_behind = false;
 
        if (mddev_is_clustered(mddev) &&
             md_cluster_ops->area_resyncing(mddev, WRITE,
@@ -1383,6 +1382,15 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
        max_sectors = r1_bio->sectors;
        for (i = 0;  i < disks; i++) {
                struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
+
+               /*
+                * The write-behind io is only attempted on drives marked as
+                * write-mostly, which means we could allocate write behind
+                * bio later.
+                */
+               if (rdev && test_bit(WriteMostly, &rdev->flags))
+                       write_behind = true;
+
                if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
                        atomic_inc(&rdev->nr_pending);
                        blocked_rdev = rdev;
@@ -1456,6 +1464,15 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
                goto retry_write;
        }
 
+       /*
+        * When using a bitmap, we may call alloc_behind_master_bio below.
+        * alloc_behind_master_bio allocates a copy of the data payload a page
+        * at a time and thus needs a new bio that can fit the whole payload
+        * this bio in page sized chunks.
+        */
+       if (write_behind && bitmap)
+               max_sectors = min_t(int, max_sectors,
+                                   BIO_MAX_VECS * (PAGE_SIZE >> 9));
        if (max_sectors < bio_sectors(bio)) {
                struct bio *split = bio_split(bio, max_sectors,
                                              GFP_NOIO, &conf->bio_split);
index 16977e8..aa26365 100644 (file)
@@ -471,12 +471,12 @@ static void raid10_end_write_request(struct bio *bio)
                        /*
                         * When the device is faulty, it is not necessary to
                         * handle write error.
-                        * For failfast, this is the only remaining device,
-                        * We need to retry the write without FailFast.
                         */
                        if (!test_bit(Faulty, &rdev->flags))
                                set_bit(R10BIO_WriteError, &r10_bio->state);
                        else {
+                               /* Fail the request */
+                               set_bit(R10BIO_Degraded, &r10_bio->state);
                                r10_bio->devs[slot].bio = NULL;
                                to_put = bio;
                                dec_rdev = 1;
@@ -1712,6 +1712,11 @@ retry_discard:
        } else
                r10_bio->master_bio = (struct bio *)first_r10bio;
 
+       /*
+        * first select target devices under rcu_lock and
+        * inc refcount on their rdev.  Record them by setting
+        * bios[x] to bio
+        */
        rcu_read_lock();
        for (disk = 0; disk < geo->raid_disks; disk++) {
                struct md_rdev *rdev = rcu_dereference(conf->mirrors[disk].rdev);
@@ -1743,9 +1748,6 @@ retry_discard:
        for (disk = 0; disk < geo->raid_disks; disk++) {
                sector_t dev_start, dev_end;
                struct bio *mbio, *rbio = NULL;
-               struct md_rdev *rdev = rcu_dereference(conf->mirrors[disk].rdev);
-               struct md_rdev *rrdev = rcu_dereference(
-                       conf->mirrors[disk].replacement);
 
                /*
                 * Now start to calculate the start and end address for each disk.
@@ -1775,9 +1777,12 @@ retry_discard:
 
                /*
                 * It only handles discard bio which size is >= stripe size, so
-                * dev_end > dev_start all the time
+                * dev_end > dev_start all the time.
+                * It doesn't need to use rcu lock to get rdev here. We already
+                * add rdev->nr_pending in the first loop.
                 */
                if (r10_bio->devs[disk].bio) {
+                       struct md_rdev *rdev = conf->mirrors[disk].rdev;
                        mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
                        mbio->bi_end_io = raid10_end_discard_request;
                        mbio->bi_private = r10_bio;
@@ -1790,6 +1795,7 @@ retry_discard:
                        bio_endio(mbio);
                }
                if (r10_bio->devs[disk].repl_bio) {
+                       struct md_rdev *rrdev = conf->mirrors[disk].replacement;
                        rbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
                        rbio->bi_end_io = raid10_end_discard_request;
                        rbio->bi_private = r10_bio;
index b8436e4..02ed53b 100644 (file)
@@ -2437,7 +2437,7 @@ static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors)
            conf->scribble_sectors >= new_sectors)
                return 0;
        mddev_suspend(conf->mddev);
-       get_online_cpus();
+       cpus_read_lock();
 
        for_each_present_cpu(cpu) {
                struct raid5_percpu *percpu;
@@ -2449,7 +2449,7 @@ static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors)
                        break;
        }
 
-       put_online_cpus();
+       cpus_read_unlock();
        mddev_resume(conf->mddev);
        if (!err) {
                conf->scribble_disks = new_disks;
index 02281d1..508ac29 100644 (file)
@@ -1573,6 +1573,7 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb,
                  struct media_request *req)
 {
        struct vb2_buffer *vb;
+       enum vb2_buffer_state orig_state;
        int ret;
 
        if (q->error) {
@@ -1673,6 +1674,7 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb,
         * Add to the queued buffers list, a buffer will stay on it until
         * dequeued in dqbuf.
         */
+       orig_state = vb->state;
        list_add_tail(&vb->queued_entry, &q->queued_list);
        q->queued_count++;
        q->waiting_for_buffers = false;
@@ -1703,8 +1705,17 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb,
        if (q->streaming && !q->start_streaming_called &&
            q->queued_count >= q->min_buffers_needed) {
                ret = vb2_start_streaming(q);
-               if (ret)
+               if (ret) {
+                       /*
+                        * Since vb2_core_qbuf will return with an error,
+                        * we should return it to state DEQUEUED since
+                        * the error indicates that the buffer wasn't queued.
+                        */
+                       list_del(&vb->queued_entry);
+                       q->queued_count--;
+                       vb->state = orig_state;
                        return ret;
+               }
        }
 
        dprintk(q, 2, "qbuf of buffer %d succeeded\n", vb->index);
index 59a36f9..30d29b9 100644 (file)
@@ -226,7 +226,7 @@ static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg,
 err_free_swnodes:
        software_node_unregister_nodes(sensor->swnodes);
 err_put_adev:
-       acpi_dev_put(sensor->adev);
+       acpi_dev_put(adev);
        return ret;
 }
 
index 99b5121..dda2f27 100644 (file)
@@ -8,6 +8,7 @@ config VIDEO_ATMEL_ISC
        select VIDEOBUF2_DMA_CONTIG
        select REGMAP_MMIO
        select V4L2_FWNODE
+       select VIDEO_ATMEL_ISC_BASE
        help
           This module makes the ATMEL Image Sensor Controller available
           as a v4l2 device.
@@ -19,10 +20,17 @@ config VIDEO_ATMEL_XISC
        select VIDEOBUF2_DMA_CONTIG
        select REGMAP_MMIO
        select V4L2_FWNODE
+       select VIDEO_ATMEL_ISC_BASE
        help
           This module makes the ATMEL eXtended Image Sensor Controller
           available as a v4l2 device.
 
+config VIDEO_ATMEL_ISC_BASE
+       tristate
+       default n
+       help
+         ATMEL ISC and XISC common code base.
+
 config VIDEO_ATMEL_ISI
        tristate "ATMEL Image Sensor Interface (ISI) support"
        depends on VIDEO_V4L2 && OF
index c5c0155..46d264a 100644 (file)
@@ -1,7 +1,8 @@
 # SPDX-License-Identifier: GPL-2.0-only
-atmel-isc-objs = atmel-sama5d2-isc.o atmel-isc-base.o
-atmel-xisc-objs = atmel-sama7g5-isc.o atmel-isc-base.o
+atmel-isc-objs = atmel-sama5d2-isc.o
+atmel-xisc-objs = atmel-sama7g5-isc.o
 
 obj-$(CONFIG_VIDEO_ATMEL_ISI) += atmel-isi.o
+obj-$(CONFIG_VIDEO_ATMEL_ISC_BASE) += atmel-isc-base.o
 obj-$(CONFIG_VIDEO_ATMEL_ISC) += atmel-isc.o
 obj-$(CONFIG_VIDEO_ATMEL_XISC) += atmel-xisc.o
index 19daa49..136ab7c 100644 (file)
@@ -378,6 +378,7 @@ int isc_clk_init(struct isc_device *isc)
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(isc_clk_init);
 
 void isc_clk_cleanup(struct isc_device *isc)
 {
@@ -392,6 +393,7 @@ void isc_clk_cleanup(struct isc_device *isc)
                        clk_unregister(isc_clk->clk);
        }
 }
+EXPORT_SYMBOL_GPL(isc_clk_cleanup);
 
 static int isc_queue_setup(struct vb2_queue *vq,
                            unsigned int *nbuffers, unsigned int *nplanes,
@@ -1578,6 +1580,7 @@ irqreturn_t isc_interrupt(int irq, void *dev_id)
 
        return ret;
 }
+EXPORT_SYMBOL_GPL(isc_interrupt);
 
 static void isc_hist_count(struct isc_device *isc, u32 *min, u32 *max)
 {
@@ -2212,6 +2215,7 @@ const struct v4l2_async_notifier_operations isc_async_ops = {
        .unbind = isc_async_unbind,
        .complete = isc_async_complete,
 };
+EXPORT_SYMBOL_GPL(isc_async_ops);
 
 void isc_subdev_cleanup(struct isc_device *isc)
 {
@@ -2224,6 +2228,7 @@ void isc_subdev_cleanup(struct isc_device *isc)
 
        INIT_LIST_HEAD(&isc->subdev_entities);
 }
+EXPORT_SYMBOL_GPL(isc_subdev_cleanup);
 
 int isc_pipeline_init(struct isc_device *isc)
 {
@@ -2264,6 +2269,7 @@ int isc_pipeline_init(struct isc_device *isc)
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(isc_pipeline_init);
 
 /* regmap configuration */
 #define ATMEL_ISC_REG_MAX    0xd5c
@@ -2273,4 +2279,9 @@ const struct regmap_config isc_regmap_config = {
        .val_bits       = 32,
        .max_register   = ATMEL_ISC_REG_MAX,
 };
+EXPORT_SYMBOL_GPL(isc_regmap_config);
 
+MODULE_AUTHOR("Songjun Wu");
+MODULE_AUTHOR("Eugen Hristev");
+MODULE_DESCRIPTION("Atmel ISC common code base");
+MODULE_LICENSE("GPL v2");
index 8370573..795a012 100644 (file)
@@ -37,7 +37,16 @@ static int rtl28xxu_ctrl_msg(struct dvb_usb_device *d, struct rtl28xxu_req *req)
        } else {
                /* read */
                requesttype = (USB_TYPE_VENDOR | USB_DIR_IN);
-               pipe = usb_rcvctrlpipe(d->udev, 0);
+
+               /*
+                * Zero-length transfers must use usb_sndctrlpipe() and
+                * rtl28xxu_identify_state() uses a zero-length i2c read
+                * command to determine the chip type.
+                */
+               if (req->size)
+                       pipe = usb_rcvctrlpipe(d->udev, 0);
+               else
+                       pipe = usb_sndctrlpipe(d->udev, 0);
        }
 
        ret = usb_control_msg(d->udev, pipe, 0, requesttype, req->value,
@@ -612,9 +621,8 @@ static int rtl28xxu_read_config(struct dvb_usb_device *d)
 static int rtl28xxu_identify_state(struct dvb_usb_device *d, const char **name)
 {
        struct rtl28xxu_dev *dev = d_to_priv(d);
-       u8 buf[1];
        int ret;
-       struct rtl28xxu_req req_demod_i2c = {0x0020, CMD_I2C_DA_RD, 1, buf};
+       struct rtl28xxu_req req_demod_i2c = {0x0020, CMD_I2C_DA_RD, 0, NULL};
 
        dev_dbg(&d->intf->dev, "\n");
 
index 3bde7fd..287da20 100644 (file)
@@ -2364,7 +2364,7 @@ static bool read_mailbox_0(void)
 
                for (n = 0; n < NUM_PRCMU_WAKEUPS; n++) {
                        if (ev & prcmu_irq_bit[n])
-                               generic_handle_irq(irq_find_mapping(db8500_irq_domain, n));
+                               generic_handle_domain_irq(db8500_irq_domain, n);
                }
                r = true;
                break;
index 5f6f0a8..37e5e02 100644 (file)
@@ -35,10 +35,10 @@ static void mx25_tsadc_irq_handler(struct irq_desc *desc)
        regmap_read(tsadc->regs, MX25_TSC_TGSR, &status);
 
        if (status & MX25_TGSR_GCQ_INT)
-               generic_handle_irq(irq_find_mapping(tsadc->domain, 1));
+               generic_handle_domain_irq(tsadc->domain, 1);
 
        if (status & MX25_TGSR_TCQ_INT)
-               generic_handle_irq(irq_find_mapping(tsadc->domain, 0));
+               generic_handle_domain_irq(tsadc->domain, 0);
 
        chained_irq_exit(chip, desc);
 }
index 99b9c11..5865683 100644 (file)
@@ -105,19 +105,15 @@ static void ioc3_irq_handler(struct irq_desc *desc)
        struct ioc3_priv_data *ipd = domain->host_data;
        struct ioc3 __iomem *regs = ipd->regs;
        u32 pending, mask;
-       unsigned int irq;
 
        pending = readl(&regs->sio_ir);
        mask = readl(&regs->sio_ies);
        pending &= mask; /* Mask off not enabled interrupts */
 
-       if (pending) {
-               irq = irq_find_mapping(domain, __ffs(pending));
-               if (irq)
-                       generic_handle_irq(irq);
-       } else  {
+       if (pending)
+               generic_handle_domain_irq(domain, __ffs(pending));
+       else
                spurious_interrupt();
-       }
 }
 
 /*
index acd172d..ec18a04 100644 (file)
@@ -122,7 +122,7 @@ bail:
 
 static int pm8xxx_irq_block_handler(struct pm_irq_chip *chip, int block)
 {
-       int pmirq, irq, i, ret = 0;
+       int pmirq, i, ret = 0;
        unsigned int bits;
 
        ret = pm8xxx_read_block_irq(chip, block, &bits);
@@ -139,8 +139,7 @@ static int pm8xxx_irq_block_handler(struct pm_irq_chip *chip, int block)
        for (i = 0; i < 8; i++) {
                if (bits & (1 << i)) {
                        pmirq = block * 8 + i;
-                       irq = irq_find_mapping(chip->irqdomain, pmirq);
-                       generic_handle_irq(irq);
+                       generic_handle_domain_irq(chip->irqdomain, pmirq);
                }
        }
        return 0;
@@ -199,7 +198,7 @@ static void pm8xxx_irq_handler(struct irq_desc *desc)
 static void pm8821_irq_block_handler(struct pm_irq_chip *chip,
                                     int master, int block)
 {
-       int pmirq, irq, i, ret;
+       int pmirq, i, ret;
        unsigned int bits;
 
        ret = regmap_read(chip->regmap,
@@ -216,8 +215,7 @@ static void pm8821_irq_block_handler(struct pm_irq_chip *chip,
        for (i = 0; i < 8; i++) {
                if (bits & BIT(i)) {
                        pmirq = block * 8 + i;
-                       irq = irq_find_mapping(chip->irqdomain, pmirq);
-                       generic_handle_irq(irq);
+                       generic_handle_domain_irq(chip->irqdomain, pmirq);
                }
        }
 }
index ce8aed5..6a15fdf 100644 (file)
@@ -128,8 +128,6 @@ struct mmc_blk_data {
         * track of the current selected device partition.
         */
        unsigned int    part_curr;
-       struct device_attribute force_ro;
-       struct device_attribute power_ro_lock;
        int     area_type;
 
        /* debugfs files (only in main mmc_blk_data) */
@@ -281,6 +279,9 @@ out_put:
        return count;
 }
 
+static DEVICE_ATTR(ro_lock_until_next_power_on, 0,
+               power_ro_lock_show, power_ro_lock_store);
+
 static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
                             char *buf)
 {
@@ -313,6 +314,44 @@ out:
        return ret;
 }
 
+static DEVICE_ATTR(force_ro, 0644, force_ro_show, force_ro_store);
+
+static struct attribute *mmc_disk_attrs[] = {
+       &dev_attr_force_ro.attr,
+       &dev_attr_ro_lock_until_next_power_on.attr,
+       NULL,
+};
+
+static umode_t mmc_disk_attrs_is_visible(struct kobject *kobj,
+               struct attribute *a, int n)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+       umode_t mode = a->mode;
+
+       if (a == &dev_attr_ro_lock_until_next_power_on.attr &&
+           (md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
+           md->queue.card->ext_csd.boot_ro_lockable) {
+               mode = S_IRUGO;
+               if (!(md->queue.card->ext_csd.boot_ro_lock &
+                               EXT_CSD_BOOT_WP_B_PWR_WP_DIS))
+                       mode |= S_IWUSR;
+       }
+
+       mmc_blk_put(md);
+       return mode;
+}
+
+static const struct attribute_group mmc_disk_attr_group = {
+       .is_visible     = mmc_disk_attrs_is_visible,
+       .attrs          = mmc_disk_attrs,
+};
+
+static const struct attribute_group *mmc_disk_attr_groups[] = {
+       &mmc_disk_attr_group,
+       NULL,
+};
+
 static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
 {
        struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
@@ -792,6 +831,26 @@ static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
 }
 #endif
 
+static int mmc_blk_alternative_gpt_sector(struct gendisk *disk,
+                                         sector_t *sector)
+{
+       struct mmc_blk_data *md;
+       int ret;
+
+       md = mmc_blk_get(disk);
+       if (!md)
+               return -EINVAL;
+
+       if (md->queue.card)
+               ret = mmc_card_alternative_gpt_sector(md->queue.card, sector);
+       else
+               ret = -ENODEV;
+
+       mmc_blk_put(md);
+
+       return ret;
+}
+
 static const struct block_device_operations mmc_bdops = {
        .open                   = mmc_blk_open,
        .release                = mmc_blk_release,
@@ -801,6 +860,7 @@ static const struct block_device_operations mmc_bdops = {
 #ifdef CONFIG_COMPAT
        .compat_ioctl           = mmc_blk_compat_ioctl,
 #endif
+       .alternative_gpt_sector = mmc_blk_alternative_gpt_sector,
 };
 
 static int mmc_blk_part_switch_pre(struct mmc_card *card,
@@ -2289,7 +2349,8 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
                                              sector_t size,
                                              bool default_ro,
                                              const char *subname,
-                                             int area_type)
+                                             int area_type,
+                                             unsigned int part_type)
 {
        struct mmc_blk_data *md;
        int devidx, ret;
@@ -2336,6 +2397,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
        kref_init(&md->kref);
 
        md->queue.blkdata = md;
+       md->part_type = part_type;
 
        md->disk->major = MMC_BLOCK_MAJOR;
        md->disk->minors = perdev_minors;
@@ -2388,6 +2450,10 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
                md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
                cap_str, md->read_only ? "(ro)" : "");
 
+       /* used in ->open, must be set before add_disk: */
+       if (area_type == MMC_BLK_DATA_AREA_MAIN)
+               dev_set_drvdata(&card->dev, md);
+       device_add_disk(md->parent, md->disk, mmc_disk_attr_groups);
        return md;
 
  err_kfree:
@@ -2417,7 +2483,7 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
        }
 
        return mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
-                                       MMC_BLK_DATA_AREA_MAIN);
+                                       MMC_BLK_DATA_AREA_MAIN, 0);
 }
 
 static int mmc_blk_alloc_part(struct mmc_card *card,
@@ -2431,10 +2497,9 @@ static int mmc_blk_alloc_part(struct mmc_card *card,
        struct mmc_blk_data *part_md;
 
        part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
-                                   subname, area_type);
+                                   subname, area_type, part_type);
        if (IS_ERR(part_md))
                return PTR_ERR(part_md);
-       part_md->part_type = part_type;
        list_add(&part_md->part, &md->part);
 
        return 0;
@@ -2635,27 +2700,13 @@ static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
 
 static void mmc_blk_remove_req(struct mmc_blk_data *md)
 {
-       struct mmc_card *card;
-
-       if (md) {
-               /*
-                * Flush remaining requests and free queues. It
-                * is freeing the queue that stops new requests
-                * from being accepted.
-                */
-               card = md->queue.card;
-               if (md->disk->flags & GENHD_FL_UP) {
-                       device_remove_file(disk_to_dev(md->disk), &md->force_ro);
-                       if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
-                                       card->ext_csd.boot_ro_lockable)
-                               device_remove_file(disk_to_dev(md->disk),
-                                       &md->power_ro_lock);
-
-                       del_gendisk(md->disk);
-               }
-               mmc_cleanup_queue(&md->queue);
-               mmc_blk_put(md);
-       }
+       /*
+        * Flush remaining requests and free queues. It is freeing the queue
+        * that stops new requests from being accepted.
+        */
+       del_gendisk(md->disk);
+       mmc_cleanup_queue(&md->queue);
+       mmc_blk_put(md);
 }
 
 static void mmc_blk_remove_parts(struct mmc_card *card,
@@ -2679,51 +2730,6 @@ static void mmc_blk_remove_parts(struct mmc_card *card,
        }
 }
 
-static int mmc_add_disk(struct mmc_blk_data *md)
-{
-       int ret;
-       struct mmc_card *card = md->queue.card;
-
-       device_add_disk(md->parent, md->disk, NULL);
-       md->force_ro.show = force_ro_show;
-       md->force_ro.store = force_ro_store;
-       sysfs_attr_init(&md->force_ro.attr);
-       md->force_ro.attr.name = "force_ro";
-       md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
-       ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
-       if (ret)
-               goto force_ro_fail;
-
-       if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
-            card->ext_csd.boot_ro_lockable) {
-               umode_t mode;
-
-               if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS)
-                       mode = S_IRUGO;
-               else
-                       mode = S_IRUGO | S_IWUSR;
-
-               md->power_ro_lock.show = power_ro_lock_show;
-               md->power_ro_lock.store = power_ro_lock_store;
-               sysfs_attr_init(&md->power_ro_lock.attr);
-               md->power_ro_lock.attr.mode = mode;
-               md->power_ro_lock.attr.name =
-                                       "ro_lock_until_next_power_on";
-               ret = device_create_file(disk_to_dev(md->disk),
-                               &md->power_ro_lock);
-               if (ret)
-                       goto power_ro_lock_fail;
-       }
-       return ret;
-
-power_ro_lock_fail:
-       device_remove_file(disk_to_dev(md->disk), &md->force_ro);
-force_ro_fail:
-       del_gendisk(md->disk);
-
-       return ret;
-}
-
 #ifdef CONFIG_DEBUG_FS
 
 static int mmc_dbg_card_status_get(void *data, u64 *val)
@@ -2889,7 +2895,7 @@ static void mmc_blk_remove_debugfs(struct mmc_card *card,
 
 static int mmc_blk_probe(struct mmc_card *card)
 {
-       struct mmc_blk_data *md, *part_md;
+       struct mmc_blk_data *md;
        int ret = 0;
 
        /*
@@ -2917,18 +2923,6 @@ static int mmc_blk_probe(struct mmc_card *card)
        if (ret)
                goto out;
 
-       dev_set_drvdata(&card->dev, md);
-
-       ret = mmc_add_disk(md);
-       if (ret)
-               goto out;
-
-       list_for_each_entry(part_md, &md->part, part) {
-               ret = mmc_add_disk(part_md);
-               if (ret)
-                       goto out;
-       }
-
        /* Add two debugfs entries */
        mmc_blk_add_debugfs(card, md);
 
index 95fedcf..605f5e8 100644 (file)
@@ -2149,6 +2149,41 @@ int mmc_detect_card_removed(struct mmc_host *host)
 }
 EXPORT_SYMBOL(mmc_detect_card_removed);
 
+int mmc_card_alternative_gpt_sector(struct mmc_card *card, sector_t *gpt_sector)
+{
+       unsigned int boot_sectors_num;
+
+       if ((!(card->host->caps2 & MMC_CAP2_ALT_GPT_TEGRA)))
+               return -EOPNOTSUPP;
+
+       /* filter out unrelated cards */
+       if (card->ext_csd.rev < 3 ||
+           !mmc_card_mmc(card) ||
+           !mmc_card_is_blockaddr(card) ||
+            mmc_card_is_removable(card->host))
+               return -ENOENT;
+
+       /*
+        * eMMC storage has two special boot partitions in addition to the
+        * main one.  NVIDIA's bootloader linearizes eMMC boot0->boot1->main
+        * accesses, this means that the partition table addresses are shifted
+        * by the size of boot partitions.  In accordance with the eMMC
+        * specification, the boot partition size is calculated as follows:
+        *
+        *      boot partition size = 128K byte x BOOT_SIZE_MULT
+        *
+        * Calculate number of sectors occupied by the both boot partitions.
+        */
+       boot_sectors_num = card->ext_csd.raw_boot_mult * SZ_128K /
+                          SZ_512 * MMC_NUM_BOOT_PARTITION;
+
+       /* Defined by NVIDIA and used by Android devices. */
+       *gpt_sector = card->ext_csd.sectors - boot_sectors_num - 1;
+
+       return 0;
+}
+EXPORT_SYMBOL(mmc_card_alternative_gpt_sector);
+
 void mmc_rescan(struct work_struct *work)
 {
        struct mmc_host *host =
index 0c4de20..7931a4f 100644 (file)
@@ -119,6 +119,8 @@ void mmc_release_host(struct mmc_host *host);
 void mmc_get_card(struct mmc_card *card, struct mmc_ctx *ctx);
 void mmc_put_card(struct mmc_card *card, struct mmc_ctx *ctx);
 
+int mmc_card_alternative_gpt_sector(struct mmc_card *card, sector_t *sector);
+
 /**
  *     mmc_claim_host - exclusively claim a host
  *     @host: mmc host to claim
index 838726b..29e58ff 100644 (file)
@@ -418,6 +418,8 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
                ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
        card->ext_csd.raw_hc_erase_grp_size =
                ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
+       card->ext_csd.raw_boot_mult =
+               ext_csd[EXT_CSD_BOOT_MULT];
        if (card->ext_csd.rev >= 3) {
                u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT];
                card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG];
index d333130..c3229d8 100644 (file)
@@ -2018,8 +2018,8 @@ static void dw_mci_tasklet_func(struct tasklet_struct *t)
                                        continue;
                                }
 
-                               dw_mci_stop_dma(host);
                                send_stop_abort(host, data);
+                               dw_mci_stop_dma(host);
                                state = STATE_SENDING_STOP;
                                break;
                        }
@@ -2043,10 +2043,10 @@ static void dw_mci_tasklet_func(struct tasklet_struct *t)
                         */
                        if (test_and_clear_bit(EVENT_DATA_ERROR,
                                               &host->pending_events)) {
-                               dw_mci_stop_dma(host);
                                if (!(host->data_status & (SDMMC_INT_DRTO |
                                                           SDMMC_INT_EBE)))
                                        send_stop_abort(host, data);
+                               dw_mci_stop_dma(host);
                                state = STATE_DATA_ERROR;
                                break;
                        }
@@ -2079,10 +2079,10 @@ static void dw_mci_tasklet_func(struct tasklet_struct *t)
                         */
                        if (test_and_clear_bit(EVENT_DATA_ERROR,
                                               &host->pending_events)) {
-                               dw_mci_stop_dma(host);
                                if (!(host->data_status & (SDMMC_INT_DRTO |
                                                           SDMMC_INT_EBE)))
                                        send_stop_abort(host, data);
+                               dw_mci_stop_dma(host);
                                state = STATE_DATA_ERROR;
                                break;
                        }
index 51db30a..fdaa11f 100644 (file)
@@ -479,8 +479,9 @@ static int sdmmc_post_sig_volt_switch(struct mmci_host *host,
        u32 status;
        int ret = 0;
 
-       if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
-               spin_lock_irqsave(&host->lock, flags);
+       spin_lock_irqsave(&host->lock, flags);
+       if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180 &&
+           host->pwr_reg & MCI_STM32_VSWITCHEN) {
                mmci_write_pwrreg(host, host->pwr_reg | MCI_STM32_VSWITCH);
                spin_unlock_irqrestore(&host->lock, flags);
 
@@ -492,9 +493,11 @@ static int sdmmc_post_sig_volt_switch(struct mmci_host *host,
 
                writel_relaxed(MCI_STM32_VSWENDC | MCI_STM32_CKSTOPC,
                               host->base + MMCICLEAR);
+               spin_lock_irqsave(&host->lock, flags);
                mmci_write_pwrreg(host, host->pwr_reg &
                                  ~(MCI_STM32_VSWITCHEN | MCI_STM32_VSWITCH));
        }
+       spin_unlock_irqrestore(&host->lock, flags);
 
        return ret;
 }
index cce390f..032bf85 100644 (file)
@@ -173,6 +173,23 @@ static unsigned int sdhci_iproc_get_max_clock(struct sdhci_host *host)
                return pltfm_host->clock;
 }
 
+/*
+ * There is a known bug on BCM2711's SDHCI core integration where the
+ * controller will hang when the difference between the core clock and the bus
+ * clock is too great. Specifically this can be reproduced under the following
+ * conditions:
+ *
+ *  - No SD card plugged in, polling thread is running, probing cards at
+ *    100 kHz.
+ *  - BCM2711's core clock configured at 500MHz or more
+ *
+ * So we set 200kHz as the minimum clock frequency available for that SoC.
+ */
+static unsigned int sdhci_iproc_bcm2711_get_min_clock(struct sdhci_host *host)
+{
+       return 200000;
+}
+
 static const struct sdhci_ops sdhci_iproc_ops = {
        .set_clock = sdhci_set_clock,
        .get_max_clock = sdhci_iproc_get_max_clock,
@@ -271,6 +288,7 @@ static const struct sdhci_ops sdhci_iproc_bcm2711_ops = {
        .set_clock = sdhci_set_clock,
        .set_power = sdhci_set_power_and_bus_voltage,
        .get_max_clock = sdhci_iproc_get_max_clock,
+       .get_min_clock = sdhci_iproc_bcm2711_get_min_clock,
        .set_bus_width = sdhci_set_bus_width,
        .reset = sdhci_reset,
        .set_uhs_signaling = sdhci_set_uhs_signaling,
index e44b7a6..290a14c 100644 (file)
@@ -2089,6 +2089,23 @@ static void sdhci_msm_cqe_disable(struct mmc_host *mmc, bool recovery)
        sdhci_cqe_disable(mmc, recovery);
 }
 
+static void sdhci_msm_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
+{
+       u32 count, start = 15;
+
+       __sdhci_set_timeout(host, cmd);
+       count = sdhci_readb(host, SDHCI_TIMEOUT_CONTROL);
+       /*
+        * Update software timeout value if its value is less than hardware data
+        * timeout value. Qcom SoC hardware data timeout value was calculated
+        * using 4 * MCLK * 2^(count + 13). where MCLK = 1 / host->clock.
+        */
+       if (cmd && cmd->data && host->clock > 400000 &&
+           host->clock <= 50000000 &&
+           ((1 << (count + start)) > (10 * host->clock)))
+               host->data_timeout = 22LL * NSEC_PER_SEC;
+}
+
 static const struct cqhci_host_ops sdhci_msm_cqhci_ops = {
        .enable         = sdhci_msm_cqe_enable,
        .disable        = sdhci_msm_cqe_disable,
@@ -2438,6 +2455,7 @@ static const struct sdhci_ops sdhci_msm_ops = {
        .irq    = sdhci_msm_cqe_irq,
        .dump_vendor_regs = sdhci_msm_dump_vendor_regs,
        .set_power = sdhci_set_power_noreg,
+       .set_timeout = sdhci_msm_set_timeout,
 };
 
 static const struct sdhci_pltfm_data sdhci_msm_pdata = {
index 387ce9c..a500187 100644 (file)
  */
 #define NVQUIRK_HAS_TMCLK                              BIT(10)
 
+#define NVQUIRK_HAS_ANDROID_GPT_SECTOR                 BIT(11)
+
 /* SDMMC CQE Base Address for Tegra Host Ver 4.1 and Higher */
 #define SDHCI_TEGRA_CQE_BASE_ADDR                      0xF000
 
@@ -1361,6 +1363,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra20 = {
        .pdata = &sdhci_tegra20_pdata,
        .dma_mask = DMA_BIT_MASK(32),
        .nvquirks = NVQUIRK_FORCE_SDHCI_SPEC_200 |
+                   NVQUIRK_HAS_ANDROID_GPT_SECTOR |
                    NVQUIRK_ENABLE_BLOCK_GAP_DET,
 };
 
@@ -1390,6 +1393,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra30 = {
        .nvquirks = NVQUIRK_ENABLE_SDHCI_SPEC_300 |
                    NVQUIRK_ENABLE_SDR50 |
                    NVQUIRK_ENABLE_SDR104 |
+                   NVQUIRK_HAS_ANDROID_GPT_SECTOR |
                    NVQUIRK_HAS_PADCALIB,
 };
 
@@ -1422,6 +1426,7 @@ static const struct sdhci_pltfm_data sdhci_tegra114_pdata = {
 static const struct sdhci_tegra_soc_data soc_data_tegra114 = {
        .pdata = &sdhci_tegra114_pdata,
        .dma_mask = DMA_BIT_MASK(32),
+       .nvquirks = NVQUIRK_HAS_ANDROID_GPT_SECTOR,
 };
 
 static const struct sdhci_pltfm_data sdhci_tegra124_pdata = {
@@ -1438,6 +1443,7 @@ static const struct sdhci_pltfm_data sdhci_tegra124_pdata = {
 static const struct sdhci_tegra_soc_data soc_data_tegra124 = {
        .pdata = &sdhci_tegra124_pdata,
        .dma_mask = DMA_BIT_MASK(34),
+       .nvquirks = NVQUIRK_HAS_ANDROID_GPT_SECTOR,
 };
 
 static const struct sdhci_ops tegra210_sdhci_ops = {
@@ -1616,6 +1622,9 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
        tegra_host->pad_control_available = false;
        tegra_host->soc_data = soc_data;
 
+       if (soc_data->nvquirks & NVQUIRK_HAS_ANDROID_GPT_SECTOR)
+               host->mmc->caps2 |= MMC_CAP2_ALT_GPT_TEGRA;
+
        if (soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL) {
                rc = tegra_sdhci_init_pinctrl_info(&pdev->dev, tegra_host);
                if (rc == 0)
index 3097e93..a761134 100644 (file)
@@ -119,7 +119,7 @@ static int cfi_use_status_reg(struct cfi_private *cfi)
        struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
        u8 poll_mask = CFI_POLL_STATUS_REG | CFI_POLL_DQ;
 
-       return extp->MinorVersion >= '5' &&
+       return extp && extp->MinorVersion >= '5' &&
                (extp->SoftwareFeatures & poll_mask) == CFI_POLL_STATUS_REG;
 }
 
index efc2003..99400d0 100644 (file)
@@ -229,7 +229,7 @@ static int mchp48l640_write(struct mtd_info *mtd, loff_t to, size_t len,
                woff += ws;
        }
 
-       return ret;
+       return 0;
 }
 
 static int mchp48l640_read_page(struct mtd_info *mtd, loff_t from, size_t len,
@@ -255,6 +255,7 @@ static int mchp48l640_read_page(struct mtd_info *mtd, loff_t from, size_t len,
        if (!ret)
                *retlen += len;
 
+       kfree(cmd);
        return ret;
 
 fail:
@@ -286,7 +287,7 @@ static int mchp48l640_read(struct mtd_info *mtd, loff_t from, size_t len,
                woff += ws;
        }
 
-       return ret;
+       return 0;
 };
 
 static const struct mchp48_caps mchp48l640_caps = {
index 6ce4bc5..44bea3f 100644 (file)
@@ -419,6 +419,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
        if (tr->discard) {
                blk_queue_flag_set(QUEUE_FLAG_DISCARD, new->rq);
                blk_queue_max_discard_sectors(new->rq, UINT_MAX);
+               new->rq->limits.discard_granularity = tr->blksize;
        }
 
        gd->queue = new->rq;
@@ -525,14 +526,10 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
        if (!blktrans_notifier.list.next)
                register_mtd_user(&blktrans_notifier);
 
-
-       mutex_lock(&mtd_table_mutex);
-
        ret = register_blkdev(tr->major, tr->name);
        if (ret < 0) {
                printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
                       tr->name, tr->major, ret);
-               mutex_unlock(&mtd_table_mutex);
                return ret;
        }
 
@@ -542,12 +539,12 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
        tr->blkshift = ffs(tr->blksize) - 1;
 
        INIT_LIST_HEAD(&tr->devs);
-       list_add(&tr->list, &blktrans_majors);
 
+       mutex_lock(&mtd_table_mutex);
+       list_add(&tr->list, &blktrans_majors);
        mtd_for_each_device(mtd)
                if (mtd->type != MTD_ABSENT)
                        tr->add_mtd(tr, mtd);
-
        mutex_unlock(&mtd_table_mutex);
        return 0;
 }
@@ -564,8 +561,8 @@ int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
        list_for_each_entry_safe(dev, next, &tr->devs, list)
                tr->remove_dev(dev);
 
-       unregister_blkdev(tr->major, tr->name);
        mutex_unlock(&mtd_table_mutex);
+       unregister_blkdev(tr->major, tr->name);
 
        BUG_ON(!list_empty(&tr->devs));
        return 0;
index b5ccd30..c8fd7f7 100644 (file)
@@ -806,7 +806,9 @@ static ssize_t mtd_otp_size(struct mtd_info *mtd, bool is_user)
 
 err:
        kfree(info);
-       return ret;
+
+       /* ENODATA means there is no OTP region. */
+       return ret == -ENODATA ? 0 : ret;
 }
 
 static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd,
index 57a5831..3d6c6e8 100644 (file)
@@ -5228,12 +5228,18 @@ static bool of_get_nand_on_flash_bbt(struct device_node *np)
 static int of_get_nand_secure_regions(struct nand_chip *chip)
 {
        struct device_node *dn = nand_get_flash_node(chip);
+       struct property *prop;
        int nr_elem, i, j;
 
-       nr_elem = of_property_count_elems_of_size(dn, "secure-regions", sizeof(u64));
-       if (!nr_elem)
+       /* Only proceed if the "secure-regions" property is present in DT */
+       prop = of_find_property(dn, "secure-regions", NULL);
+       if (!prop)
                return 0;
 
+       nr_elem = of_property_count_elems_of_size(dn, "secure-regions", sizeof(u64));
+       if (nr_elem <= 0)
+               return nr_elem;
+
        chip->nr_secure_regions = nr_elem / 2;
        chip->secure_regions = kcalloc(chip->nr_secure_regions, sizeof(*chip->secure_regions),
                                       GFP_KERNEL);
index a7ee0af..54e321a 100644 (file)
@@ -71,12 +71,18 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
                family = AF_INET6;
 
        if (bareudp->ethertype == htons(ETH_P_IP)) {
-               struct iphdr *iphdr;
+               __u8 ipversion;
 
-               iphdr = (struct iphdr *)(skb->data + BAREUDP_BASE_HLEN);
-               if (iphdr->version == 4) {
-                       proto = bareudp->ethertype;
-               } else if (bareudp->multi_proto_mode && (iphdr->version == 6)) {
+               if (skb_copy_bits(skb, BAREUDP_BASE_HLEN, &ipversion,
+                                 sizeof(ipversion))) {
+                       bareudp->dev->stats.rx_dropped++;
+                       goto drop;
+               }
+               ipversion >>= 4;
+
+               if (ipversion == 4) {
+                       proto = htons(ETH_P_IP);
+               } else if (ipversion == 6 && bareudp->multi_proto_mode) {
                        proto = htons(ETH_P_IPV6);
                } else {
                        bareudp->dev->stats.rx_dropped++;
index bba2a44..43bca31 100644 (file)
@@ -1164,10 +1164,10 @@ static int m_can_set_bittiming(struct net_device *dev)
                                    FIELD_PREP(TDCR_TDCO_MASK, tdco));
                }
 
-               reg_btp = FIELD_PREP(NBTP_NBRP_MASK, brp) |
-                         FIELD_PREP(NBTP_NSJW_MASK, sjw) |
-                         FIELD_PREP(NBTP_NTSEG1_MASK, tseg1) |
-                         FIELD_PREP(NBTP_NTSEG2_MASK, tseg2);
+               reg_btp |= FIELD_PREP(DBTP_DBRP_MASK, brp) |
+                       FIELD_PREP(DBTP_DSJW_MASK, sjw) |
+                       FIELD_PREP(DBTP_DTSEG1_MASK, tseg1) |
+                       FIELD_PREP(DBTP_DTSEG2_MASK, tseg2);
 
                m_can_write(cdev, M_CAN_DBTP, reg_btp);
        }
index dd17b8c..89d9c98 100644 (file)
@@ -218,7 +218,7 @@ static int hi3110_spi_trans(struct spi_device *spi, int len)
        return ret;
 }
 
-static u8 hi3110_cmd(struct spi_device *spi, u8 command)
+static int hi3110_cmd(struct spi_device *spi, u8 command)
 {
        struct hi3110_priv *priv = spi_get_drvdata(spi);
 
index 47c3f40..9ae4807 100644 (file)
@@ -2300,6 +2300,7 @@ static irqreturn_t mcp251xfd_irq(int irq, void *dev_id)
                   err, priv->regs_status.intf);
        mcp251xfd_dump(priv);
        mcp251xfd_chip_interrupts_disable(priv);
+       mcp251xfd_timestamp_stop(priv);
 
        return handled;
 }
index 0a37af4..2b5302e 100644 (file)
@@ -255,6 +255,8 @@ struct ems_usb {
        unsigned int free_slots; /* remember number of available slots */
 
        struct ems_cpc_msg active_params; /* active controller parameters */
+       void *rxbuf[MAX_RX_URBS];
+       dma_addr_t rxbuf_dma[MAX_RX_URBS];
 };
 
 static void ems_usb_read_interrupt_callback(struct urb *urb)
@@ -587,6 +589,7 @@ static int ems_usb_start(struct ems_usb *dev)
        for (i = 0; i < MAX_RX_URBS; i++) {
                struct urb *urb = NULL;
                u8 *buf = NULL;
+               dma_addr_t buf_dma;
 
                /* create a URB, and a buffer for it */
                urb = usb_alloc_urb(0, GFP_KERNEL);
@@ -596,7 +599,7 @@ static int ems_usb_start(struct ems_usb *dev)
                }
 
                buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE, GFP_KERNEL,
-                                        &urb->transfer_dma);
+                                        &buf_dma);
                if (!buf) {
                        netdev_err(netdev, "No memory left for USB buffer\n");
                        usb_free_urb(urb);
@@ -604,6 +607,8 @@ static int ems_usb_start(struct ems_usb *dev)
                        break;
                }
 
+               urb->transfer_dma = buf_dma;
+
                usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, 2),
                                  buf, RX_BUFFER_SIZE,
                                  ems_usb_read_bulk_callback, dev);
@@ -619,6 +624,9 @@ static int ems_usb_start(struct ems_usb *dev)
                        break;
                }
 
+               dev->rxbuf[i] = buf;
+               dev->rxbuf_dma[i] = buf_dma;
+
                /* Drop reference, USB core will take care of freeing it */
                usb_free_urb(urb);
        }
@@ -684,6 +692,10 @@ static void unlink_all_urbs(struct ems_usb *dev)
 
        usb_kill_anchored_urbs(&dev->rx_submitted);
 
+       for (i = 0; i < MAX_RX_URBS; ++i)
+               usb_free_coherent(dev->udev, RX_BUFFER_SIZE,
+                                 dev->rxbuf[i], dev->rxbuf_dma[i]);
+
        usb_kill_anchored_urbs(&dev->tx_submitted);
        atomic_set(&dev->active_tx_urbs, 0);
 
index 65b58f8..95ae740 100644 (file)
@@ -195,6 +195,8 @@ struct esd_usb2 {
        int net_count;
        u32 version;
        int rxinitdone;
+       void *rxbuf[MAX_RX_URBS];
+       dma_addr_t rxbuf_dma[MAX_RX_URBS];
 };
 
 struct esd_usb2_net_priv {
@@ -222,8 +224,8 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
        if (id == ESD_EV_CAN_ERROR_EXT) {
                u8 state = msg->msg.rx.data[0];
                u8 ecc = msg->msg.rx.data[1];
-               u8 txerr = msg->msg.rx.data[2];
-               u8 rxerr = msg->msg.rx.data[3];
+               u8 rxerr = msg->msg.rx.data[2];
+               u8 txerr = msg->msg.rx.data[3];
 
                skb = alloc_can_err_skb(priv->netdev, &cf);
                if (skb == NULL) {
@@ -545,6 +547,7 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev)
        for (i = 0; i < MAX_RX_URBS; i++) {
                struct urb *urb = NULL;
                u8 *buf = NULL;
+               dma_addr_t buf_dma;
 
                /* create a URB, and a buffer for it */
                urb = usb_alloc_urb(0, GFP_KERNEL);
@@ -554,7 +557,7 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev)
                }
 
                buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE, GFP_KERNEL,
-                                        &urb->transfer_dma);
+                                        &buf_dma);
                if (!buf) {
                        dev_warn(dev->udev->dev.parent,
                                 "No memory left for USB buffer\n");
@@ -562,6 +565,8 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev)
                        goto freeurb;
                }
 
+               urb->transfer_dma = buf_dma;
+
                usb_fill_bulk_urb(urb, dev->udev,
                                  usb_rcvbulkpipe(dev->udev, 1),
                                  buf, RX_BUFFER_SIZE,
@@ -574,8 +579,12 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev)
                        usb_unanchor_urb(urb);
                        usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf,
                                          urb->transfer_dma);
+                       goto freeurb;
                }
 
+               dev->rxbuf[i] = buf;
+               dev->rxbuf_dma[i] = buf_dma;
+
 freeurb:
                /* Drop reference, USB core will take care of freeing it */
                usb_free_urb(urb);
@@ -663,6 +672,11 @@ static void unlink_all_urbs(struct esd_usb2 *dev)
        int i, j;
 
        usb_kill_anchored_urbs(&dev->rx_submitted);
+
+       for (i = 0; i < MAX_RX_URBS; ++i)
+               usb_free_coherent(dev->udev, RX_BUFFER_SIZE,
+                                 dev->rxbuf[i], dev->rxbuf_dma[i]);
+
        for (i = 0; i < dev->net_count; i++) {
                priv = dev->nets[i];
                if (priv) {
index a45865b..a1a154c 100644 (file)
@@ -653,6 +653,8 @@ static int mcba_usb_start(struct mcba_priv *priv)
                        break;
                }
 
+               urb->transfer_dma = buf_dma;
+
                usb_fill_bulk_urb(urb, priv->udev,
                                  usb_rcvbulkpipe(priv->udev, MCBA_USB_EP_IN),
                                  buf, MCBA_USB_RX_BUFF_SIZE,
index 1d6f772..899a3d2 100644 (file)
 #define PCAN_USB_BERR_MASK     (PCAN_USB_ERR_RXERR | PCAN_USB_ERR_TXERR)
 
 /* identify bus event packets with rx/tx error counters */
-#define PCAN_USB_ERR_CNT               0x80
+#define PCAN_USB_ERR_CNT_DEC           0x00    /* counters are decreasing */
+#define PCAN_USB_ERR_CNT_INC           0x80    /* counters are increasing */
 
 /* private to PCAN-USB adapter */
 struct pcan_usb {
@@ -608,11 +609,12 @@ static int pcan_usb_handle_bus_evt(struct pcan_usb_msg_context *mc, u8 ir)
 
        /* acccording to the content of the packet */
        switch (ir) {
-       case PCAN_USB_ERR_CNT:
+       case PCAN_USB_ERR_CNT_DEC:
+       case PCAN_USB_ERR_CNT_INC:
 
                /* save rx/tx error counters from in the device context */
-               pdev->bec.rxerr = mc->ptr[0];
-               pdev->bec.txerr = mc->ptr[1];
+               pdev->bec.rxerr = mc->ptr[1];
+               pdev->bec.txerr = mc->ptr[2];
                break;
 
        default:
index b6e7ef0..d1b83bd 100644 (file)
@@ -137,7 +137,8 @@ struct usb_8dev_priv {
        u8 *cmd_msg_buffer;
 
        struct mutex usb_8dev_cmd_lock;
-
+       void *rxbuf[MAX_RX_URBS];
+       dma_addr_t rxbuf_dma[MAX_RX_URBS];
 };
 
 /* tx frame */
@@ -733,6 +734,7 @@ static int usb_8dev_start(struct usb_8dev_priv *priv)
        for (i = 0; i < MAX_RX_URBS; i++) {
                struct urb *urb = NULL;
                u8 *buf;
+               dma_addr_t buf_dma;
 
                /* create a URB, and a buffer for it */
                urb = usb_alloc_urb(0, GFP_KERNEL);
@@ -742,7 +744,7 @@ static int usb_8dev_start(struct usb_8dev_priv *priv)
                }
 
                buf = usb_alloc_coherent(priv->udev, RX_BUFFER_SIZE, GFP_KERNEL,
-                                        &urb->transfer_dma);
+                                        &buf_dma);
                if (!buf) {
                        netdev_err(netdev, "No memory left for USB buffer\n");
                        usb_free_urb(urb);
@@ -750,6 +752,8 @@ static int usb_8dev_start(struct usb_8dev_priv *priv)
                        break;
                }
 
+               urb->transfer_dma = buf_dma;
+
                usb_fill_bulk_urb(urb, priv->udev,
                                  usb_rcvbulkpipe(priv->udev,
                                                  USB_8DEV_ENDP_DATA_RX),
@@ -767,6 +771,9 @@ static int usb_8dev_start(struct usb_8dev_priv *priv)
                        break;
                }
 
+               priv->rxbuf[i] = buf;
+               priv->rxbuf_dma[i] = buf_dma;
+
                /* Drop reference, USB core will take care of freeing it */
                usb_free_urb(urb);
        }
@@ -836,6 +843,10 @@ static void unlink_all_urbs(struct usb_8dev_priv *priv)
 
        usb_kill_anchored_urbs(&priv->rx_submitted);
 
+       for (i = 0; i < MAX_RX_URBS; ++i)
+               usb_free_coherent(priv->udev, RX_BUFFER_SIZE,
+                                 priv->rxbuf[i], priv->rxbuf_dma[i]);
+
        usb_kill_anchored_urbs(&priv->tx_submitted);
        atomic_set(&priv->active_tx_urbs, 0);
 
index 9fdcc4b..7062db6 100644 (file)
@@ -912,6 +912,7 @@ static int hellcreek_fdb_dump(struct dsa_switch *ds, int port,
 {
        struct hellcreek *hellcreek = ds->priv;
        u16 entries;
+       int ret = 0;
        size_t i;
 
        mutex_lock(&hellcreek->reg_lock);
@@ -943,12 +944,14 @@ static int hellcreek_fdb_dump(struct dsa_switch *ds, int port,
                if (!(entry.portmask & BIT(port)))
                        continue;
 
-               cb(entry.mac, 0, entry.is_static, data);
+               ret = cb(entry.mac, 0, entry.is_static, data);
+               if (ret)
+                       break;
        }
 
        mutex_unlock(&hellcreek->reg_lock);
 
-       return 0;
+       return ret;
 }
 
 static int hellcreek_vlan_filtering(struct dsa_switch *ds, int port,
@@ -1469,9 +1472,6 @@ static void hellcreek_setup_gcl(struct hellcreek *hellcreek, int port,
                u16 data;
                u8 gates;
 
-               cur++;
-               next++;
-
                if (i == schedule->num_entries)
                        gates = initial->gate_mask ^
                                cur->gate_mask;
@@ -1500,6 +1500,9 @@ static void hellcreek_setup_gcl(struct hellcreek *hellcreek, int port,
                        (initial->gate_mask <<
                         TR_GCLCMD_INIT_GATE_STATES_SHIFT);
                hellcreek_write(hellcreek, data, TR_GCLCMD);
+
+               cur++;
+               next++;
        }
 }
 
@@ -1547,7 +1550,7 @@ static bool hellcreek_schedule_startable(struct hellcreek *hellcreek, int port)
        /* Calculate difference to admin base time */
        base_time_ns = ktime_to_ns(hellcreek_port->current_schedule->base_time);
 
-       return base_time_ns - current_ns < (s64)8 * NSEC_PER_SEC;
+       return base_time_ns - current_ns < (s64)4 * NSEC_PER_SEC;
 }
 
 static void hellcreek_start_schedule(struct hellcreek *hellcreek, int port)
index 3443740..d7ce281 100644 (file)
@@ -557,12 +557,12 @@ static int lan9303_alr_make_entry_raw(struct lan9303 *chip, u32 dat0, u32 dat1)
        return 0;
 }
 
-typedef void alr_loop_cb_t(struct lan9303 *chip, u32 dat0, u32 dat1,
-                          int portmap, void *ctx);
+typedef int alr_loop_cb_t(struct lan9303 *chip, u32 dat0, u32 dat1,
+                         int portmap, void *ctx);
 
-static void lan9303_alr_loop(struct lan9303 *chip, alr_loop_cb_t *cb, void *ctx)
+static int lan9303_alr_loop(struct lan9303 *chip, alr_loop_cb_t *cb, void *ctx)
 {
-       int i;
+       int ret = 0, i;
 
        mutex_lock(&chip->alr_mutex);
        lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD,
@@ -582,13 +582,17 @@ static void lan9303_alr_loop(struct lan9303 *chip, alr_loop_cb_t *cb, void *ctx)
                                                LAN9303_ALR_DAT1_PORT_BITOFFS;
                portmap = alrport_2_portmap[alrport];
 
-               cb(chip, dat0, dat1, portmap, ctx);
+               ret = cb(chip, dat0, dat1, portmap, ctx);
+               if (ret)
+                       break;
 
                lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD,
                                         LAN9303_ALR_CMD_GET_NEXT);
                lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD, 0);
        }
        mutex_unlock(&chip->alr_mutex);
+
+       return ret;
 }
 
 static void alr_reg_to_mac(u32 dat0, u32 dat1, u8 mac[6])
@@ -606,18 +610,20 @@ struct del_port_learned_ctx {
 };
 
 /* Clear learned (non-static) entry on given port */
-static void alr_loop_cb_del_port_learned(struct lan9303 *chip, u32 dat0,
-                                        u32 dat1, int portmap, void *ctx)
+static int alr_loop_cb_del_port_learned(struct lan9303 *chip, u32 dat0,
+                                       u32 dat1, int portmap, void *ctx)
 {
        struct del_port_learned_ctx *del_ctx = ctx;
        int port = del_ctx->port;
 
        if (((BIT(port) & portmap) == 0) || (dat1 & LAN9303_ALR_DAT1_STATIC))
-               return;
+               return 0;
 
        /* learned entries has only one port, we can just delete */
        dat1 &= ~LAN9303_ALR_DAT1_VALID; /* delete entry */
        lan9303_alr_make_entry_raw(chip, dat0, dat1);
+
+       return 0;
 }
 
 struct port_fdb_dump_ctx {
@@ -626,19 +632,19 @@ struct port_fdb_dump_ctx {
        dsa_fdb_dump_cb_t *cb;
 };
 
-static void alr_loop_cb_fdb_port_dump(struct lan9303 *chip, u32 dat0,
-                                     u32 dat1, int portmap, void *ctx)
+static int alr_loop_cb_fdb_port_dump(struct lan9303 *chip, u32 dat0,
+                                    u32 dat1, int portmap, void *ctx)
 {
        struct port_fdb_dump_ctx *dump_ctx = ctx;
        u8 mac[ETH_ALEN];
        bool is_static;
 
        if ((BIT(dump_ctx->port) & portmap) == 0)
-               return;
+               return 0;
 
        alr_reg_to_mac(dat0, dat1, mac);
        is_static = !!(dat1 & LAN9303_ALR_DAT1_STATIC);
-       dump_ctx->cb(mac, 0, is_static, dump_ctx->data);
+       return dump_ctx->cb(mac, 0, is_static, dump_ctx->data);
 }
 
 /* Set a static ALR entry. Delete entry if port_map is zero */
@@ -1210,9 +1216,7 @@ static int lan9303_port_fdb_dump(struct dsa_switch *ds, int port,
        };
 
        dev_dbg(chip->dev, "%s(%d)\n", __func__, port);
-       lan9303_alr_loop(chip, alr_loop_cb_fdb_port_dump, &dump_ctx);
-
-       return 0;
+       return lan9303_alr_loop(chip, alr_loop_cb_fdb_port_dump, &dump_ctx);
 }
 
 static int lan9303_port_mdb_prepare(struct dsa_switch *ds, int port,
index 314ae78..e78026e 100644 (file)
@@ -1404,11 +1404,17 @@ static int gswip_port_fdb_dump(struct dsa_switch *ds, int port,
                addr[1] = mac_bridge.key[2] & 0xff;
                addr[0] = (mac_bridge.key[2] >> 8) & 0xff;
                if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_STATIC) {
-                       if (mac_bridge.val[0] & BIT(port))
-                               cb(addr, 0, true, data);
+                       if (mac_bridge.val[0] & BIT(port)) {
+                               err = cb(addr, 0, true, data);
+                               if (err)
+                                       return err;
+                       }
                } else {
-                       if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) == port)
-                               cb(addr, 0, false, data);
+                       if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) == port) {
+                               err = cb(addr, 0, false, data);
+                               if (err)
+                                       return err;
+                       }
                }
        }
        return 0;
index 560f684..c5142f8 100644 (file)
@@ -687,8 +687,8 @@ static void ksz8_r_vlan_entries(struct ksz_device *dev, u16 addr)
        shifts = ksz8->shifts;
 
        ksz8_r_table(dev, TABLE_VLAN, addr, &data);
-       addr *= dev->phy_port_cnt;
-       for (i = 0; i < dev->phy_port_cnt; i++) {
+       addr *= 4;
+       for (i = 0; i < 4; i++) {
                dev->vlan_cache[addr + i].table[0] = (u16)data;
                data >>= shifts[VLAN_TABLE];
        }
@@ -702,7 +702,7 @@ static void ksz8_r_vlan_table(struct ksz_device *dev, u16 vid, u16 *vlan)
        u64 buf;
 
        data = (u16 *)&buf;
-       addr = vid / dev->phy_port_cnt;
+       addr = vid / 4;
        index = vid & 3;
        ksz8_r_table(dev, TABLE_VLAN, addr, &buf);
        *vlan = data[index];
@@ -716,7 +716,7 @@ static void ksz8_w_vlan_table(struct ksz_device *dev, u16 vid, u16 vlan)
        u64 buf;
 
        data = (u16 *)&buf;
-       addr = vid / dev->phy_port_cnt;
+       addr = vid / 4;
        index = vid & 3;
        ksz8_r_table(dev, TABLE_VLAN, addr, &buf);
        data[index] = vlan;
@@ -1119,24 +1119,67 @@ static int ksz8_port_vlan_filtering(struct dsa_switch *ds, int port, bool flag,
        if (ksz_is_ksz88x3(dev))
                return -ENOTSUPP;
 
+       /* Discard packets with VID not enabled on the switch */
        ksz_cfg(dev, S_MIRROR_CTRL, SW_VLAN_ENABLE, flag);
 
+       /* Discard packets with VID not enabled on the ingress port */
+       for (port = 0; port < dev->phy_port_cnt; ++port)
+               ksz_port_cfg(dev, port, REG_PORT_CTRL_2, PORT_INGRESS_FILTER,
+                            flag);
+
        return 0;
 }
 
+static void ksz8_port_enable_pvid(struct ksz_device *dev, int port, bool state)
+{
+       if (ksz_is_ksz88x3(dev)) {
+               ksz_cfg(dev, REG_SW_INSERT_SRC_PVID,
+                       0x03 << (4 - 2 * port), state);
+       } else {
+               ksz_pwrite8(dev, port, REG_PORT_CTRL_12, state ? 0x0f : 0x00);
+       }
+}
+
 static int ksz8_port_vlan_add(struct dsa_switch *ds, int port,
                              const struct switchdev_obj_port_vlan *vlan,
                              struct netlink_ext_ack *extack)
 {
        bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
        struct ksz_device *dev = ds->priv;
+       struct ksz_port *p = &dev->ports[port];
        u16 data, new_pvid = 0;
        u8 fid, member, valid;
 
        if (ksz_is_ksz88x3(dev))
                return -ENOTSUPP;
 
-       ksz_port_cfg(dev, port, P_TAG_CTRL, PORT_REMOVE_TAG, untagged);
+       /* If a VLAN is added with untagged flag different from the
+        * port's Remove Tag flag, we need to change the latter.
+        * Ignore VID 0, which is always untagged.
+        * Ignore CPU port, which will always be tagged.
+        */
+       if (untagged != p->remove_tag && vlan->vid != 0 &&
+           port != dev->cpu_port) {
+               unsigned int vid;
+
+               /* Reject attempts to add a VLAN that requires the
+                * Remove Tag flag to be changed, unless there are no
+                * other VLANs currently configured.
+                */
+               for (vid = 1; vid < dev->num_vlans; ++vid) {
+                       /* Skip the VID we are going to add or reconfigure */
+                       if (vid == vlan->vid)
+                               continue;
+
+                       ksz8_from_vlan(dev, dev->vlan_cache[vid].table[0],
+                                      &fid, &member, &valid);
+                       if (valid && (member & BIT(port)))
+                               return -EINVAL;
+               }
+
+               ksz_port_cfg(dev, port, P_TAG_CTRL, PORT_REMOVE_TAG, untagged);
+               p->remove_tag = untagged;
+       }
 
        ksz8_r_vlan_table(dev, vlan->vid, &data);
        ksz8_from_vlan(dev, data, &fid, &member, &valid);
@@ -1160,9 +1203,11 @@ static int ksz8_port_vlan_add(struct dsa_switch *ds, int port,
                u16 vid;
 
                ksz_pread16(dev, port, REG_PORT_CTRL_VID, &vid);
-               vid &= 0xfff;
+               vid &= ~VLAN_VID_MASK;
                vid |= new_pvid;
                ksz_pwrite16(dev, port, REG_PORT_CTRL_VID, vid);
+
+               ksz8_port_enable_pvid(dev, port, true);
        }
 
        return 0;
@@ -1171,9 +1216,8 @@ static int ksz8_port_vlan_add(struct dsa_switch *ds, int port,
 static int ksz8_port_vlan_del(struct dsa_switch *ds, int port,
                              const struct switchdev_obj_port_vlan *vlan)
 {
-       bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
        struct ksz_device *dev = ds->priv;
-       u16 data, pvid, new_pvid = 0;
+       u16 data, pvid;
        u8 fid, member, valid;
 
        if (ksz_is_ksz88x3(dev))
@@ -1182,8 +1226,6 @@ static int ksz8_port_vlan_del(struct dsa_switch *ds, int port,
        ksz_pread16(dev, port, REG_PORT_CTRL_VID, &pvid);
        pvid = pvid & 0xFFF;
 
-       ksz_port_cfg(dev, port, P_TAG_CTRL, PORT_REMOVE_TAG, untagged);
-
        ksz8_r_vlan_table(dev, vlan->vid, &data);
        ksz8_from_vlan(dev, data, &fid, &member, &valid);
 
@@ -1195,14 +1237,11 @@ static int ksz8_port_vlan_del(struct dsa_switch *ds, int port,
                valid = 0;
        }
 
-       if (pvid == vlan->vid)
-               new_pvid = 1;
-
        ksz8_to_vlan(dev, fid, member, valid, &data);
        ksz8_w_vlan_table(dev, vlan->vid, data);
 
-       if (new_pvid != pvid)
-               ksz_pwrite16(dev, port, REG_PORT_CTRL_VID, pvid);
+       if (pvid == vlan->vid)
+               ksz8_port_enable_pvid(dev, port, false);
 
        return 0;
 }
@@ -1435,6 +1474,9 @@ static int ksz8_setup(struct dsa_switch *ds)
 
        ksz_cfg(dev, S_MIRROR_CTRL, SW_MIRROR_RX_TX, false);
 
+       if (!ksz_is_ksz88x3(dev))
+               ksz_cfg(dev, REG_SW_CTRL_19, SW_INS_TAG_ENABLE, true);
+
        /* set broadcast storm protection 10% rate */
        regmap_update_bits(dev->regmap[1], S_REPLACE_VID_CTRL,
                           BROADCAST_STORM_RATE,
@@ -1717,6 +1759,16 @@ static int ksz8_switch_init(struct ksz_device *dev)
        /* set the real number of ports */
        dev->ds->num_ports = dev->port_cnt;
 
+       /* We rely on software untagging on the CPU port, so that we
+        * can support both tagged and untagged VLANs
+        */
+       dev->ds->untag_bridge_pvid = true;
+
+       /* VLAN filtering is partly controlled by the global VLAN
+        * Enable flag
+        */
+       dev->ds->vlan_filtering_is_global = true;
+
        return 0;
 }
 
index a323556..6b40bc2 100644 (file)
 #define REG_PORT_4_OUT_RATE_3          0xEE
 #define REG_PORT_5_OUT_RATE_3          0xFE
 
+/* 88x3 specific */
+
+#define REG_SW_INSERT_SRC_PVID         0xC2
+
 /* PME */
 
 #define SW_PME_OUTPUT_ENABLE           BIT(1)
index 2e6bfd3..1597c63 100644 (file)
@@ -27,6 +27,7 @@ struct ksz_port_mib {
 struct ksz_port {
        u16 member;
        u16 vid_member;
+       bool remove_tag;                /* Remove Tag flag set, for ksz8795 only */
        int stp_state;
        struct phy_device phydev;
 
@@ -205,12 +206,8 @@ static inline int ksz_read64(struct ksz_device *dev, u32 reg, u64 *val)
        int ret;
 
        ret = regmap_bulk_read(dev->regmap[2], reg, value, 2);
-       if (!ret) {
-               /* Ick! ToDo: Add 64bit R/W to regmap on 32bit systems */
-               value[0] = swab32(value[0]);
-               value[1] = swab32(value[1]);
-               *val = swab64((u64)*value);
-       }
+       if (!ret)
+               *val = (u64)value[0] << 32 | value[1];
 
        return ret;
 }
index 69f21b7..632f0fc 100644 (file)
@@ -47,6 +47,7 @@ static const struct mt7530_mib_desc mt7530_mib[] = {
        MIB_DESC(2, 0x48, "TxBytes"),
        MIB_DESC(1, 0x60, "RxDrop"),
        MIB_DESC(1, 0x64, "RxFiltering"),
+       MIB_DESC(1, 0x68, "RxUnicast"),
        MIB_DESC(1, 0x6c, "RxMulticast"),
        MIB_DESC(1, 0x70, "RxBroadcast"),
        MIB_DESC(1, 0x74, "RxAlignErr"),
index beb4157..272b053 100644 (file)
@@ -2155,7 +2155,7 @@ static int mv88e6xxx_port_vlan_leave(struct mv88e6xxx_chip *chip,
        int i, err;
 
        if (!vid)
-               return -EOPNOTSUPP;
+               return 0;
 
        err = mv88e6xxx_vtu_get(chip, vid, &vlan);
        if (err)
index b1d46dd..6ea0036 100644 (file)
@@ -1277,15 +1277,16 @@ static int mv88e6393x_serdes_port_errata(struct mv88e6xxx_chip *chip, int lane)
        int err;
 
        /* mv88e6393x family errata 4.6:
-        * Cannot clear PwrDn bit on SERDES on port 0 if device is configured
-        * CPU_MGD mode or P0_mode is configured for [x]MII.
-        * Workaround: Set Port0 SERDES register 4.F002 bit 5=0 and bit 15=1.
+        * Cannot clear PwrDn bit on SERDES if device is configured CPU_MGD
+        * mode or P0_mode is configured for [x]MII.
+        * Workaround: Set SERDES register 4.F002 bit 5=0 and bit 15=1.
         *
         * It seems that after this workaround the SERDES is automatically
         * powered up (the bit is cleared), so power it down.
         */
-       if (lane == MV88E6393X_PORT0_LANE) {
-               err = mv88e6390_serdes_read(chip, MV88E6393X_PORT0_LANE,
+       if (lane == MV88E6393X_PORT0_LANE || lane == MV88E6393X_PORT9_LANE ||
+           lane == MV88E6393X_PORT10_LANE) {
+               err = mv88e6390_serdes_read(chip, lane,
                                            MDIO_MMD_PHYXS,
                                            MV88E6393X_SERDES_POC, &reg);
                if (err)
index ca2ad77..563d8a2 100644 (file)
         AR9331_SW_PORT_STATUS_RX_FLOW_EN | AR9331_SW_PORT_STATUS_TX_FLOW_EN | \
         AR9331_SW_PORT_STATUS_SPEED_M)
 
+#define AR9331_SW_REG_PORT_CTRL(_port)                 (0x104 + (_port) * 0x100)
+#define AR9331_SW_PORT_CTRL_HEAD_EN                    BIT(11)
+#define AR9331_SW_PORT_CTRL_PORT_STATE                 GENMASK(2, 0)
+#define AR9331_SW_PORT_CTRL_PORT_STATE_DISABLED                0
+#define AR9331_SW_PORT_CTRL_PORT_STATE_BLOCKING                1
+#define AR9331_SW_PORT_CTRL_PORT_STATE_LISTENING       2
+#define AR9331_SW_PORT_CTRL_PORT_STATE_LEARNING                3
+#define AR9331_SW_PORT_CTRL_PORT_STATE_FORWARD         4
+
+#define AR9331_SW_REG_PORT_VLAN(_port)                 (0x108 + (_port) * 0x100)
+#define AR9331_SW_PORT_VLAN_8021Q_MODE                 GENMASK(31, 30)
+#define AR9331_SW_8021Q_MODE_SECURE                    3
+#define AR9331_SW_8021Q_MODE_CHECK                     2
+#define AR9331_SW_8021Q_MODE_FALLBACK                  1
+#define AR9331_SW_8021Q_MODE_NONE                      0
+#define AR9331_SW_PORT_VLAN_PORT_VID_MEMBER            GENMASK(25, 16)
+
 /* MIB registers */
 #define AR9331_MIB_COUNTER(x)                  (0x20000 + ((x) * 0x100))
 
@@ -371,12 +388,60 @@ static int ar9331_sw_mbus_init(struct ar9331_sw_priv *priv)
        return 0;
 }
 
-static int ar9331_sw_setup(struct dsa_switch *ds)
+static int ar9331_sw_setup_port(struct dsa_switch *ds, int port)
 {
        struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
        struct regmap *regmap = priv->regmap;
+       u32 port_mask, port_ctrl, val;
        int ret;
 
+       /* Generate default port settings */
+       port_ctrl = FIELD_PREP(AR9331_SW_PORT_CTRL_PORT_STATE,
+                              AR9331_SW_PORT_CTRL_PORT_STATE_FORWARD);
+
+       if (dsa_is_cpu_port(ds, port)) {
+               /* CPU port should be allowed to communicate with all user
+                * ports.
+                */
+               port_mask = dsa_user_ports(ds);
+               /* Enable Atheros header on CPU port. This will allow us
+                * communicate with each port separately
+                */
+               port_ctrl |= AR9331_SW_PORT_CTRL_HEAD_EN;
+       } else if (dsa_is_user_port(ds, port)) {
+               /* User ports should communicate only with the CPU port.
+                */
+               port_mask = BIT(dsa_upstream_port(ds, port));
+       } else {
+               /* Other ports do not need to communicate at all */
+               port_mask = 0;
+       }
+
+       val = FIELD_PREP(AR9331_SW_PORT_VLAN_8021Q_MODE,
+                        AR9331_SW_8021Q_MODE_NONE) |
+               FIELD_PREP(AR9331_SW_PORT_VLAN_PORT_VID_MEMBER, port_mask);
+
+       ret = regmap_write(regmap, AR9331_SW_REG_PORT_VLAN(port), val);
+       if (ret)
+               goto error;
+
+       ret = regmap_write(regmap, AR9331_SW_REG_PORT_CTRL(port), port_ctrl);
+       if (ret)
+               goto error;
+
+       return 0;
+error:
+       dev_err(priv->dev, "%s: error: %i\n", __func__, ret);
+
+       return ret;
+}
+
+static int ar9331_sw_setup(struct dsa_switch *ds)
+{
+       struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
+       struct regmap *regmap = priv->regmap;
+       int ret, i;
+
        ret = ar9331_sw_reset(priv);
        if (ret)
                return ret;
@@ -402,6 +467,12 @@ static int ar9331_sw_setup(struct dsa_switch *ds)
        if (ret)
                goto error;
 
+       for (i = 0; i < ds->num_ports; i++) {
+               ret = ar9331_sw_setup_port(ds, i);
+               if (ret)
+                       goto error;
+       }
+
        ds->configure_vlan_while_not_filtering = false;
 
        return 0;
@@ -837,16 +908,24 @@ static int ar9331_mdio_write(void *ctx, u32 reg, u32 val)
                return 0;
        }
 
-       ret = __ar9331_mdio_write(sbus, AR9331_SW_MDIO_PHY_MODE_REG, reg, val);
+       /* In case of this switch we work with 32bit registers on top of 16bit
+        * bus. Some registers (for example access to forwarding database) have
+        * trigger bit on the first 16bit half of request, the result and
+        * configuration of request in the second half.
+        * To make it work properly, we should do the second part of transfer
+        * before the first one is done.
+        */
+       ret = __ar9331_mdio_write(sbus, AR9331_SW_MDIO_PHY_MODE_REG, reg + 2,
+                                 val >> 16);
        if (ret < 0)
                goto error;
 
-       ret = __ar9331_mdio_write(sbus, AR9331_SW_MDIO_PHY_MODE_REG, reg + 2,
-                                 val >> 16);
+       ret = __ar9331_mdio_write(sbus, AR9331_SW_MDIO_PHY_MODE_REG, reg, val);
        if (ret < 0)
                goto error;
 
        return 0;
+
 error:
        dev_err_ratelimited(&sbus->dev, "Bus error. Failed to write register.\n");
        return ret;
index 56fead6..1477091 100644 (file)
@@ -304,6 +304,15 @@ sja1105pqrs_common_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
                        hostcmd = SJA1105_HOSTCMD_INVALIDATE;
        }
        sja1105_packing(p, &hostcmd, 25, 23, size, op);
+}
+
+static void
+sja1105pqrs_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+                                 enum packing_op op)
+{
+       int entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
+
+       sja1105pqrs_common_l2_lookup_cmd_packing(buf, cmd, op, entry_size);
 
        /* Hack - The hardware takes the 'index' field within
         * struct sja1105_l2_lookup_entry as the index on which this command
@@ -313,26 +322,18 @@ sja1105pqrs_common_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
         * such that our API doesn't need to ask for a full-blown entry
         * structure when e.g. a delete is requested.
         */
-       sja1105_packing(buf, &cmd->index, 15, 6,
-                       SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY, op);
-}
-
-static void
-sja1105pqrs_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
-                                 enum packing_op op)
-{
-       int size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
-
-       return sja1105pqrs_common_l2_lookup_cmd_packing(buf, cmd, op, size);
+       sja1105_packing(buf, &cmd->index, 15, 6, entry_size, op);
 }
 
 static void
 sja1110_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
                              enum packing_op op)
 {
-       int size = SJA1110_SIZE_L2_LOOKUP_ENTRY;
+       int entry_size = SJA1110_SIZE_L2_LOOKUP_ENTRY;
+
+       sja1105pqrs_common_l2_lookup_cmd_packing(buf, cmd, op, entry_size);
 
-       return sja1105pqrs_common_l2_lookup_cmd_packing(buf, cmd, op, size);
+       sja1105_packing(buf, &cmd->index, 10, 1, entry_size, op);
 }
 
 /* The switch is so retarded that it makes our command/entry abstraction
index e2dc997..49eb0ac 100644 (file)
@@ -1318,10 +1318,11 @@ static int sja1105et_is_fdb_entry_in_bin(struct sja1105_private *priv, int bin,
 int sja1105et_fdb_add(struct dsa_switch *ds, int port,
                      const unsigned char *addr, u16 vid)
 {
-       struct sja1105_l2_lookup_entry l2_lookup = {0};
+       struct sja1105_l2_lookup_entry l2_lookup = {0}, tmp;
        struct sja1105_private *priv = ds->priv;
        struct device *dev = ds->dev;
        int last_unused = -1;
+       int start, end, i;
        int bin, way, rc;
 
        bin = sja1105et_fdb_hash(priv, addr, vid);
@@ -1333,7 +1334,7 @@ int sja1105et_fdb_add(struct dsa_switch *ds, int port,
                 * mask? If yes, we need to do nothing. If not, we need
                 * to rewrite the entry by adding this port to it.
                 */
-               if (l2_lookup.destports & BIT(port))
+               if ((l2_lookup.destports & BIT(port)) && l2_lookup.lockeds)
                        return 0;
                l2_lookup.destports |= BIT(port);
        } else {
@@ -1364,6 +1365,7 @@ int sja1105et_fdb_add(struct dsa_switch *ds, int port,
                                                     index, NULL, false);
                }
        }
+       l2_lookup.lockeds = true;
        l2_lookup.index = sja1105et_fdb_index(bin, way);
 
        rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
@@ -1372,6 +1374,29 @@ int sja1105et_fdb_add(struct dsa_switch *ds, int port,
        if (rc < 0)
                return rc;
 
+       /* Invalidate a dynamically learned entry if that exists */
+       start = sja1105et_fdb_index(bin, 0);
+       end = sja1105et_fdb_index(bin, way);
+
+       for (i = start; i < end; i++) {
+               rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
+                                                i, &tmp);
+               if (rc == -ENOENT)
+                       continue;
+               if (rc)
+                       return rc;
+
+               if (tmp.macaddr != ether_addr_to_u64(addr) || tmp.vlanid != vid)
+                       continue;
+
+               rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
+                                                 i, NULL, false);
+               if (rc)
+                       return rc;
+
+               break;
+       }
+
        return sja1105_static_fdb_change(priv, port, &l2_lookup, true);
 }
 
@@ -1413,32 +1438,30 @@ int sja1105et_fdb_del(struct dsa_switch *ds, int port,
 int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port,
                        const unsigned char *addr, u16 vid)
 {
-       struct sja1105_l2_lookup_entry l2_lookup = {0};
+       struct sja1105_l2_lookup_entry l2_lookup = {0}, tmp;
        struct sja1105_private *priv = ds->priv;
        int rc, i;
 
        /* Search for an existing entry in the FDB table */
        l2_lookup.macaddr = ether_addr_to_u64(addr);
        l2_lookup.vlanid = vid;
-       l2_lookup.iotag = SJA1105_S_TAG;
        l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
-       if (priv->vlan_state != SJA1105_VLAN_UNAWARE) {
-               l2_lookup.mask_vlanid = VLAN_VID_MASK;
-               l2_lookup.mask_iotag = BIT(0);
-       } else {
-               l2_lookup.mask_vlanid = 0;
-               l2_lookup.mask_iotag = 0;
-       }
+       l2_lookup.mask_vlanid = VLAN_VID_MASK;
        l2_lookup.destports = BIT(port);
 
+       tmp = l2_lookup;
+
        rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
-                                        SJA1105_SEARCH, &l2_lookup);
-       if (rc == 0) {
-               /* Found and this port is already in the entry's
+                                        SJA1105_SEARCH, &tmp);
+       if (rc == 0 && tmp.index != SJA1105_MAX_L2_LOOKUP_COUNT - 1) {
+               /* Found a static entry and this port is already in the entry's
                 * port mask => job done
                 */
-               if (l2_lookup.destports & BIT(port))
+               if ((tmp.destports & BIT(port)) && tmp.lockeds)
                        return 0;
+
+               l2_lookup = tmp;
+
                /* l2_lookup.index is populated by the switch in case it
                 * found something.
                 */
@@ -1460,16 +1483,46 @@ int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port,
                dev_err(ds->dev, "FDB is full, cannot add entry.\n");
                return -EINVAL;
        }
-       l2_lookup.lockeds = true;
        l2_lookup.index = i;
 
 skip_finding_an_index:
+       l2_lookup.lockeds = true;
+
        rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
                                          l2_lookup.index, &l2_lookup,
                                          true);
        if (rc < 0)
                return rc;
 
+       /* The switch learns dynamic entries and looks up the FDB left to
+        * right. It is possible that our addition was concurrent with the
+        * dynamic learning of the same address, so now that the static entry
+        * has been installed, we are certain that address learning for this
+        * particular address has been turned off, so the dynamic entry either
+        * is in the FDB at an index smaller than the static one, or isn't (it
+        * can also be at a larger index, but in that case it is inactive
+        * because the static FDB entry will match first, and the dynamic one
+        * will eventually age out). Search for a dynamically learned address
+        * prior to our static one and invalidate it.
+        */
+       tmp = l2_lookup;
+
+       rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
+                                        SJA1105_SEARCH, &tmp);
+       if (rc < 0) {
+               dev_err(ds->dev,
+                       "port %d failed to read back entry for %pM vid %d: %pe\n",
+                       port, addr, vid, ERR_PTR(rc));
+               return rc;
+       }
+
+       if (tmp.index < l2_lookup.index) {
+               rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
+                                                 tmp.index, NULL, false);
+               if (rc < 0)
+                       return rc;
+       }
+
        return sja1105_static_fdb_change(priv, port, &l2_lookup, true);
 }
 
@@ -1483,15 +1536,8 @@ int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port,
 
        l2_lookup.macaddr = ether_addr_to_u64(addr);
        l2_lookup.vlanid = vid;
-       l2_lookup.iotag = SJA1105_S_TAG;
        l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
-       if (priv->vlan_state != SJA1105_VLAN_UNAWARE) {
-               l2_lookup.mask_vlanid = VLAN_VID_MASK;
-               l2_lookup.mask_iotag = BIT(0);
-       } else {
-               l2_lookup.mask_vlanid = 0;
-               l2_lookup.mask_iotag = 0;
-       }
+       l2_lookup.mask_vlanid = VLAN_VID_MASK;
        l2_lookup.destports = BIT(port);
 
        rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
@@ -1589,7 +1635,9 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
                /* We need to hide the dsa_8021q VLANs from the user. */
                if (priv->vlan_state == SJA1105_VLAN_UNAWARE)
                        l2_lookup.vlanid = 0;
-               cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);
+               rc = cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);
+               if (rc)
+                       return rc;
        }
        return 0;
 }
@@ -3139,6 +3187,7 @@ static void sja1105_teardown(struct dsa_switch *ds)
        }
 
        sja1105_devlink_teardown(ds);
+       sja1105_mdiobus_unregister(ds);
        sja1105_flower_teardown(ds);
        sja1105_tas_teardown(ds);
        sja1105_ptp_clock_unregister(ds);
index 19aea8f..705d390 100644 (file)
@@ -284,8 +284,7 @@ static int sja1105_mdiobus_base_tx_register(struct sja1105_private *priv,
        struct mii_bus *bus;
        int rc = 0;
 
-       np = of_find_compatible_node(mdio_node, NULL,
-                                    "nxp,sja1110-base-tx-mdio");
+       np = of_get_compatible_child(mdio_node, "nxp,sja1110-base-tx-mdio");
        if (!np)
                return 0;
 
@@ -339,8 +338,7 @@ static int sja1105_mdiobus_base_t1_register(struct sja1105_private *priv,
        struct mii_bus *bus;
        int rc = 0;
 
-       np = of_find_compatible_node(mdio_node, NULL,
-                                    "nxp,sja1110-base-t1-mdio");
+       np = of_get_compatible_child(mdio_node, "nxp,sja1110-base-t1-mdio");
        if (!np)
                return 0;
 
index 860c18f..80399c8 100644 (file)
@@ -677,11 +677,13 @@ static int xge_probe(struct platform_device *pdev)
        ret = register_netdev(ndev);
        if (ret) {
                netdev_err(ndev, "Failed to register netdev\n");
-               goto err;
+               goto err_mdio_remove;
        }
 
        return 0;
 
+err_mdio_remove:
+       xge_mdio_remove(ndev);
 err:
        free_netdev(ndev);
 
index 1a6ec1a..b5d954c 100644 (file)
@@ -2669,7 +2669,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
        }
 
        /* Allocated memory for FW statistics  */
-       if (bnx2x_alloc_fw_stats_mem(bp))
+       rc = bnx2x_alloc_fw_stats_mem(bp);
+       if (rc)
                LOAD_ERROR_EXIT(bp, load_error0);
 
        /* request pf to initialize status blocks */
index 4db162c..8a97640 100644 (file)
@@ -72,7 +72,8 @@
 #include "bnxt_debugfs.h"
 
 #define BNXT_TX_TIMEOUT                (5 * HZ)
-#define BNXT_DEF_MSG_ENABLE    (NETIF_MSG_DRV | NETIF_MSG_HW)
+#define BNXT_DEF_MSG_ENABLE    (NETIF_MSG_DRV | NETIF_MSG_HW | \
+                                NETIF_MSG_TX_ERR)
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
@@ -365,6 +366,33 @@ static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
        return md_dst->u.port_info.port_id;
 }
 
+static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
+                            u16 prod)
+{
+       bnxt_db_write(bp, &txr->tx_db, prod);
+       txr->kick_pending = 0;
+}
+
+static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp,
+                                         struct bnxt_tx_ring_info *txr,
+                                         struct netdev_queue *txq)
+{
+       netif_tx_stop_queue(txq);
+
+       /* netif_tx_stop_queue() must be done before checking
+        * tx index in bnxt_tx_avail() below, because in
+        * bnxt_tx_int(), we update tx index before checking for
+        * netif_tx_queue_stopped().
+        */
+       smp_mb();
+       if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) {
+               netif_tx_wake_queue(txq);
+               return false;
+       }
+
+       return true;
+}
+
 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct bnxt *bp = netdev_priv(dev);
@@ -384,6 +412,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
        i = skb_get_queue_mapping(skb);
        if (unlikely(i >= bp->tx_nr_rings)) {
                dev_kfree_skb_any(skb);
+               atomic_long_inc(&dev->tx_dropped);
                return NETDEV_TX_OK;
        }
 
@@ -393,8 +422,12 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        free_size = bnxt_tx_avail(bp, txr);
        if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
-               netif_tx_stop_queue(txq);
-               return NETDEV_TX_BUSY;
+               /* We must have raced with NAPI cleanup */
+               if (net_ratelimit() && txr->kick_pending)
+                       netif_warn(bp, tx_err, dev,
+                                  "bnxt: ring busy w/ flush pending!\n");
+               if (bnxt_txr_netif_try_stop_queue(bp, txr, txq))
+                       return NETDEV_TX_BUSY;
        }
 
        length = skb->len;
@@ -426,7 +459,10 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
                if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) &&
                    atomic_dec_if_positive(&ptp->tx_avail) >= 0) {
-                       if (!bnxt_ptp_parse(skb, &ptp->tx_seqid)) {
+                       if (!bnxt_ptp_parse(skb, &ptp->tx_seqid,
+                                           &ptp->tx_hdr_off)) {
+                               if (vlan_tag_flags)
+                                       ptp->tx_hdr_off += VLAN_HLEN;
                                lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
                                skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
                        } else {
@@ -514,21 +550,16 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
 normal_tx:
        if (length < BNXT_MIN_PKT_SIZE) {
                pad = BNXT_MIN_PKT_SIZE - length;
-               if (skb_pad(skb, pad)) {
+               if (skb_pad(skb, pad))
                        /* SKB already freed. */
-                       tx_buf->skb = NULL;
-                       return NETDEV_TX_OK;
-               }
+                       goto tx_kick_pending;
                length = BNXT_MIN_PKT_SIZE;
        }
 
        mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
 
-       if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
-               dev_kfree_skb_any(skb);
-               tx_buf->skb = NULL;
-               return NETDEV_TX_OK;
-       }
+       if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
+               goto tx_free;
 
        dma_unmap_addr_set(tx_buf, mapping, mapping);
        flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
@@ -615,24 +646,17 @@ normal_tx:
        txr->tx_prod = prod;
 
        if (!netdev_xmit_more() || netif_xmit_stopped(txq))
-               bnxt_db_write(bp, &txr->tx_db, prod);
+               bnxt_txr_db_kick(bp, txr, prod);
+       else
+               txr->kick_pending = 1;
 
 tx_done:
 
        if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
                if (netdev_xmit_more() && !tx_buf->is_push)
-                       bnxt_db_write(bp, &txr->tx_db, prod);
-
-               netif_tx_stop_queue(txq);
+                       bnxt_txr_db_kick(bp, txr, prod);
 
-               /* netif_tx_stop_queue() must be done before checking
-                * tx index in bnxt_tx_avail() below, because in
-                * bnxt_tx_int(), we update tx index before checking for
-                * netif_tx_queue_stopped().
-                */
-               smp_mb();
-               if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
-                       netif_tx_wake_queue(txq);
+               bnxt_txr_netif_try_stop_queue(bp, txr, txq);
        }
        return NETDEV_TX_OK;
 
@@ -645,7 +669,6 @@ tx_dma_error:
        /* start back at beginning and unmap skb */
        prod = txr->tx_prod;
        tx_buf = &txr->tx_buf_ring[prod];
-       tx_buf->skb = NULL;
        dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
                         skb_headlen(skb), PCI_DMA_TODEVICE);
        prod = NEXT_TX(prod);
@@ -659,7 +682,13 @@ tx_dma_error:
                               PCI_DMA_TODEVICE);
        }
 
+tx_free:
        dev_kfree_skb_any(skb);
+tx_kick_pending:
+       if (txr->kick_pending)
+               bnxt_txr_db_kick(bp, txr, txr->tx_prod);
+       txr->tx_buf_ring[txr->tx_prod].skb = NULL;
+       atomic_long_inc(&dev->tx_dropped);
        return NETDEV_TX_OK;
 }
 
@@ -729,14 +758,9 @@ next_tx_int:
        smp_mb();
 
        if (unlikely(netif_tx_queue_stopped(txq)) &&
-           (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
-               __netif_tx_lock(txq, smp_processor_id());
-               if (netif_tx_queue_stopped(txq) &&
-                   bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
-                   txr->dev_state != BNXT_DEV_STATE_CLOSING)
-                       netif_tx_wake_queue(txq);
-               __netif_tx_unlock(txq);
-       }
+           bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
+           READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING)
+               netif_tx_wake_queue(txq);
 }
 
 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
@@ -1764,6 +1788,10 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
        if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
                return -EBUSY;
 
+       /* The valid test of the entry must be done first before
+        * reading any further.
+        */
+       dma_rmb();
        prod = rxr->rx_prod;
 
        if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
@@ -1986,6 +2014,10 @@ static int bnxt_force_rx_discard(struct bnxt *bp,
        if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
                return -EBUSY;
 
+       /* The valid test of the entry must be done first before
+        * reading any further.
+        */
+       dma_rmb();
        cmp_type = RX_CMP_TYPE(rxcmp);
        if (cmp_type == CMP_TYPE_RX_L2_CMP) {
                rxcmp1->rx_cmp_cfa_code_errors_v2 |=
@@ -2451,6 +2483,10 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
                if (!TX_CMP_VALID(txcmp, raw_cons))
                        break;
 
+               /* The valid test of the entry must be done first before
+                * reading any further.
+                */
+               dma_rmb();
                if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
                        tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
                        cp_cons = RING_CMP(tmp_raw_cons);
@@ -9125,10 +9161,9 @@ static void bnxt_disable_napi(struct bnxt *bp)
        for (i = 0; i < bp->cp_nr_rings; i++) {
                struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
 
+               napi_disable(&bp->bnapi[i]->napi);
                if (bp->bnapi[i]->rx_ring)
                        cancel_work_sync(&cpr->dim.work);
-
-               napi_disable(&bp->bnapi[i]->napi);
        }
 }
 
@@ -9162,9 +9197,11 @@ void bnxt_tx_disable(struct bnxt *bp)
        if (bp->tx_ring) {
                for (i = 0; i < bp->tx_nr_rings; i++) {
                        txr = &bp->tx_ring[i];
-                       txr->dev_state = BNXT_DEV_STATE_CLOSING;
+                       WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
                }
        }
+       /* Make sure napi polls see @dev_state change */
+       synchronize_net();
        /* Drop carrier first to prevent TX timeout */
        netif_carrier_off(bp->dev);
        /* Stop all TX queues */
@@ -9178,8 +9215,10 @@ void bnxt_tx_enable(struct bnxt *bp)
 
        for (i = 0; i < bp->tx_nr_rings; i++) {
                txr = &bp->tx_ring[i];
-               txr->dev_state = 0;
+               WRITE_ONCE(txr->dev_state, 0);
        }
+       /* Make sure napi polls see @dev_state change */
+       synchronize_net();
        netif_tx_wake_all_queues(bp->dev);
        if (bp->link_info.link_up)
                netif_carrier_on(bp->dev);
@@ -10765,6 +10804,9 @@ static bool bnxt_rfs_supported(struct bnxt *bp)
                        return true;
                return false;
        }
+       /* 212 firmware is broken for aRFS */
+       if (BNXT_FW_MAJ(bp) == 212)
+               return false;
        if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
                return true;
        if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
@@ -12131,9 +12173,8 @@ static void bnxt_fw_reset_task(struct work_struct *work)
                /* Make sure fw_reset_state is 0 before clearing the flag */
                smp_mb__before_atomic();
                clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
-               bnxt_ulp_start(bp, rc);
-               if (!rc)
-                       bnxt_reenable_sriov(bp);
+               bnxt_ulp_start(bp, 0);
+               bnxt_reenable_sriov(bp);
                bnxt_vf_reps_alloc(bp);
                bnxt_vf_reps_open(bp);
                bnxt_dl_health_recovery_done(bp);
index bcf8d00..ba4e0fc 100644 (file)
@@ -786,6 +786,7 @@ struct bnxt_tx_ring_info {
        u16                     tx_prod;
        u16                     tx_cons;
        u16                     txq_index;
+       u8                      kick_pending;
        struct bnxt_db_info     tx_db;
 
        struct tx_bd            *tx_desc_ring[MAX_TX_PAGES];
index 3fc6781..94d07a9 100644 (file)
@@ -368,6 +368,7 @@ struct cmd_nums {
        #define HWRM_FUNC_PTP_TS_QUERY                    0x19fUL
        #define HWRM_FUNC_PTP_EXT_CFG                     0x1a0UL
        #define HWRM_FUNC_PTP_EXT_QCFG                    0x1a1UL
+       #define HWRM_FUNC_KEY_CTX_ALLOC                   0x1a2UL
        #define HWRM_SELFTEST_QLIST                       0x200UL
        #define HWRM_SELFTEST_EXEC                        0x201UL
        #define HWRM_SELFTEST_IRQ                         0x202UL
@@ -531,8 +532,8 @@ struct hwrm_err_output {
 #define HWRM_VERSION_MAJOR 1
 #define HWRM_VERSION_MINOR 10
 #define HWRM_VERSION_UPDATE 2
-#define HWRM_VERSION_RSVD 47
-#define HWRM_VERSION_STR "1.10.2.47"
+#define HWRM_VERSION_RSVD 52
+#define HWRM_VERSION_STR "1.10.2.52"
 
 /* hwrm_ver_get_input (size:192b/24B) */
 struct hwrm_ver_get_input {
@@ -585,6 +586,7 @@ struct hwrm_ver_get_output {
        #define VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED              0x1000UL
        #define VER_GET_RESP_DEV_CAPS_CFG_CFA_TFLIB_SUPPORTED                      0x2000UL
        #define VER_GET_RESP_DEV_CAPS_CFG_CFA_TRUFLOW_SUPPORTED                    0x4000UL
+       #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_BOOT_CAPABLE                      0x8000UL
        u8      roce_fw_maj_8b;
        u8      roce_fw_min_8b;
        u8      roce_fw_bld_8b;
@@ -886,7 +888,8 @@ struct hwrm_async_event_cmpl_reset_notify {
        #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL        (0x2UL << 8)
        #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_NON_FATAL    (0x3UL << 8)
        #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FAST_RESET                (0x4UL << 8)
-       #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_LAST                     ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FAST_RESET
+       #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_ACTIVATION             (0x5UL << 8)
+       #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_LAST                     ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_ACTIVATION
        #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_MASK           0xffff0000UL
        #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_SFT            16
 };
@@ -1236,13 +1239,14 @@ struct hwrm_async_event_cmpl_error_report_base {
        u8      timestamp_lo;
        __le16  timestamp_hi;
        __le32  event_data1;
-       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK          0xffUL
-       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT           0
-       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_RESERVED        0x0UL
-       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM     0x1UL
-       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL  0x2UL
-       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM             0x3UL
-       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST           ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK                   0xffUL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT                    0
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_RESERVED                 0x0UL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM              0x1UL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL           0x2UL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM                      0x3UL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD  0x4UL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST                    ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD
 };
 
 /* hwrm_async_event_cmpl_error_report_pause_storm (size:128b/16B) */
@@ -1446,6 +1450,8 @@ struct hwrm_func_vf_cfg_input {
        #define FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS            0x200UL
        #define FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS        0x400UL
        #define FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS     0x800UL
+       #define FUNC_VF_CFG_REQ_ENABLES_NUM_TX_KEY_CTXS      0x1000UL
+       #define FUNC_VF_CFG_REQ_ENABLES_NUM_RX_KEY_CTXS      0x2000UL
        __le16  mtu;
        __le16  guest_vlan;
        __le16  async_event_cr;
@@ -1469,7 +1475,8 @@ struct hwrm_func_vf_cfg_input {
        __le16  num_vnics;
        __le16  num_stat_ctxs;
        __le16  num_hw_ring_grps;
-       u8      unused_0[4];
+       __le16  num_tx_key_ctxs;
+       __le16  num_rx_key_ctxs;
 };
 
 /* hwrm_func_vf_cfg_output (size:128b/16B) */
@@ -1493,7 +1500,7 @@ struct hwrm_func_qcaps_input {
        u8      unused_0[6];
 };
 
-/* hwrm_func_qcaps_output (size:704b/88B) */
+/* hwrm_func_qcaps_output (size:768b/96B) */
 struct hwrm_func_qcaps_output {
        __le16  error_code;
        __le16  req_type;
@@ -1587,7 +1594,8 @@ struct hwrm_func_qcaps_output {
        #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TE_CFA      0x4UL
        #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_RE_CFA      0x8UL
        #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_PRIMATE     0x10UL
-       u8      unused_1;
+       __le16  max_key_ctxs_alloc;
+       u8      unused_1[7];
        u8      valid;
 };
 
@@ -1602,7 +1610,7 @@ struct hwrm_func_qcfg_input {
        u8      unused_0[6];
 };
 
-/* hwrm_func_qcfg_output (size:832b/104B) */
+/* hwrm_func_qcfg_output (size:896b/112B) */
 struct hwrm_func_qcfg_output {
        __le16  error_code;
        __le16  req_type;
@@ -1749,11 +1757,13 @@ struct hwrm_func_qcfg_output {
        #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
        #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST         FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100
        __le16  host_mtu;
-       u8      unused_3;
+       __le16  alloc_tx_key_ctxs;
+       __le16  alloc_rx_key_ctxs;
+       u8      unused_3[5];
        u8      valid;
 };
 
-/* hwrm_func_cfg_input (size:832b/104B) */
+/* hwrm_func_cfg_input (size:896b/112B) */
 struct hwrm_func_cfg_input {
        __le16  req_type;
        __le16  cmpl_ring;
@@ -1820,6 +1830,8 @@ struct hwrm_func_cfg_input {
        #define FUNC_CFG_REQ_ENABLES_PARTITION_MAX_BW         0x8000000UL
        #define FUNC_CFG_REQ_ENABLES_TPID                     0x10000000UL
        #define FUNC_CFG_REQ_ENABLES_HOST_MTU                 0x20000000UL
+       #define FUNC_CFG_REQ_ENABLES_TX_KEY_CTXS              0x40000000UL
+       #define FUNC_CFG_REQ_ENABLES_RX_KEY_CTXS              0x80000000UL
        __le16  admin_mtu;
        __le16  mru;
        __le16  num_rsscos_ctxs;
@@ -1929,6 +1941,9 @@ struct hwrm_func_cfg_input {
        #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST         FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100
        __be16  tpid;
        __le16  host_mtu;
+       __le16  num_tx_key_ctxs;
+       __le16  num_rx_key_ctxs;
+       u8      unused_0[4];
 };
 
 /* hwrm_func_cfg_output (size:128b/16B) */
@@ -2099,6 +2114,7 @@ struct hwrm_func_drv_rgtr_input {
        #define FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT                   0x40UL
        #define FUNC_DRV_RGTR_REQ_FLAGS_FAST_RESET_SUPPORT               0x80UL
        #define FUNC_DRV_RGTR_REQ_FLAGS_RSS_STRICT_HASH_TYPE_SUPPORT     0x100UL
+       #define FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT                 0x200UL
        __le32  enables;
        #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE             0x1UL
        #define FUNC_DRV_RGTR_REQ_ENABLES_VER                 0x2UL
@@ -2268,7 +2284,7 @@ struct hwrm_func_resource_qcaps_input {
        u8      unused_0[6];
 };
 
-/* hwrm_func_resource_qcaps_output (size:448b/56B) */
+/* hwrm_func_resource_qcaps_output (size:512b/64B) */
 struct hwrm_func_resource_qcaps_output {
        __le16  error_code;
        __le16  req_type;
@@ -2300,11 +2316,15 @@ struct hwrm_func_resource_qcaps_output {
        __le16  max_tx_scheduler_inputs;
        __le16  flags;
        #define FUNC_RESOURCE_QCAPS_RESP_FLAGS_MIN_GUARANTEED     0x1UL
+       __le16  min_tx_key_ctxs;
+       __le16  max_tx_key_ctxs;
+       __le16  min_rx_key_ctxs;
+       __le16  max_rx_key_ctxs;
        u8      unused_0[5];
        u8      valid;
 };
 
-/* hwrm_func_vf_resource_cfg_input (size:448b/56B) */
+/* hwrm_func_vf_resource_cfg_input (size:512b/64B) */
 struct hwrm_func_vf_resource_cfg_input {
        __le16  req_type;
        __le16  cmpl_ring;
@@ -2331,6 +2351,10 @@ struct hwrm_func_vf_resource_cfg_input {
        __le16  max_hw_ring_grps;
        __le16  flags;
        #define FUNC_VF_RESOURCE_CFG_REQ_FLAGS_MIN_GUARANTEED     0x1UL
+       __le16  min_tx_key_ctxs;
+       __le16  max_tx_key_ctxs;
+       __le16  min_rx_key_ctxs;
+       __le16  max_rx_key_ctxs;
        u8      unused_0[2];
 };
 
@@ -2348,7 +2372,9 @@ struct hwrm_func_vf_resource_cfg_output {
        __le16  reserved_vnics;
        __le16  reserved_stat_ctx;
        __le16  reserved_hw_ring_grps;
-       u8      unused_0[7];
+       __le16  reserved_tx_key_ctxs;
+       __le16  reserved_rx_key_ctxs;
+       u8      unused_0[3];
        u8      valid;
 };
 
@@ -4220,7 +4246,7 @@ struct hwrm_port_lpbk_clr_stats_output {
        u8      valid;
 };
 
-/* hwrm_port_ts_query_input (size:256b/32B) */
+/* hwrm_port_ts_query_input (size:320b/40B) */
 struct hwrm_port_ts_query_input {
        __le16  req_type;
        __le16  cmpl_ring;
@@ -4238,8 +4264,11 @@ struct hwrm_port_ts_query_input {
        __le16  enables;
        #define PORT_TS_QUERY_REQ_ENABLES_TS_REQ_TIMEOUT     0x1UL
        #define PORT_TS_QUERY_REQ_ENABLES_PTP_SEQ_ID         0x2UL
+       #define PORT_TS_QUERY_REQ_ENABLES_PTP_HDR_OFFSET     0x4UL
        __le16  ts_req_timeout;
        __le32  ptp_seq_id;
+       __le16  ptp_hdr_offset;
+       u8      unused_1[6];
 };
 
 /* hwrm_port_ts_query_output (size:192b/24B) */
@@ -8172,6 +8201,7 @@ struct hwrm_fw_reset_input {
        u8      host_idx;
        u8      flags;
        #define FW_RESET_REQ_FLAGS_RESET_GRACEFUL     0x1UL
+       #define FW_RESET_REQ_FLAGS_FW_ACTIVATION      0x2UL
        u8      unused_0[4];
 };
 
@@ -8952,7 +8982,7 @@ struct hwrm_nvm_get_dir_info_output {
        u8      valid;
 };
 
-/* hwrm_nvm_write_input (size:384b/48B) */
+/* hwrm_nvm_write_input (size:448b/56B) */
 struct hwrm_nvm_write_input {
        __le16  req_type;
        __le16  cmpl_ring;
@@ -8968,7 +8998,11 @@ struct hwrm_nvm_write_input {
        __le16  option;
        __le16  flags;
        #define NVM_WRITE_REQ_FLAGS_KEEP_ORIG_ACTIVE_IMG     0x1UL
+       #define NVM_WRITE_REQ_FLAGS_BATCH_MODE               0x2UL
+       #define NVM_WRITE_REQ_FLAGS_BATCH_LAST               0x4UL
        __le32  dir_item_length;
+       __le32  offset;
+       __le32  len;
        __le32  unused_0;
 };
 
index 9089e7f..81f40ab 100644 (file)
@@ -20,7 +20,7 @@
 #include "bnxt.h"
 #include "bnxt_ptp.h"
 
-int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id)
+int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id, u16 *hdr_off)
 {
        unsigned int ptp_class;
        struct ptp_header *hdr;
@@ -34,6 +34,7 @@ int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id)
                if (!hdr)
                        return -EINVAL;
 
+               *hdr_off = (u8 *)hdr - skb->data;
                *seq_id  = ntohs(hdr->sequence_id);
                return 0;
        default:
@@ -91,6 +92,7 @@ static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts)
            PORT_TS_QUERY_REQ_FLAGS_PATH_TX) {
                req.enables = cpu_to_le16(BNXT_PTP_QTS_TX_ENABLES);
                req.ptp_seq_id = cpu_to_le32(bp->ptp_cfg->tx_seqid);
+               req.ptp_hdr_offset = cpu_to_le16(bp->ptp_cfg->tx_hdr_off);
                req.ts_req_timeout = cpu_to_le16(BNXT_PTP_QTS_TIMEOUT);
        }
        mutex_lock(&bp->hwrm_cmd_lock);
@@ -353,6 +355,12 @@ static long bnxt_ptp_ts_aux_work(struct ptp_clock_info *ptp_info)
 
        bnxt_ptp_get_current_time(bp);
        ptp->next_period = now + HZ;
+       if (time_after_eq(now, ptp->next_overflow_check)) {
+               spin_lock_bh(&ptp->ptp_lock);
+               timecounter_read(&ptp->tc);
+               spin_unlock_bh(&ptp->ptp_lock);
+               ptp->next_overflow_check = now + BNXT_PHC_OVERFLOW_PERIOD;
+       }
        return HZ;
 }
 
@@ -423,6 +431,7 @@ int bnxt_ptp_init(struct bnxt *bp)
        ptp->cc.shift = 0;
        ptp->cc.mult = 1;
 
+       ptp->next_overflow_check = jiffies + BNXT_PHC_OVERFLOW_PERIOD;
        timecounter_init(&ptp->tc, &ptp->cc, ktime_to_ns(ktime_get_real()));
 
        ptp->ptp_info = bnxt_ptp_caps;
index 4135ea3..524f1c2 100644 (file)
@@ -10,8 +10,8 @@
 #ifndef BNXT_PTP_H
 #define BNXT_PTP_H
 
-#define BNXT_PTP_GRC_WIN       5
-#define BNXT_PTP_GRC_WIN_BASE  0x5000
+#define BNXT_PTP_GRC_WIN       6
+#define BNXT_PTP_GRC_WIN_BASE  0x6000
 
 #define BNXT_MAX_PHC_DRIFT     31000000
 #define BNXT_LO_TIMER_MASK     0x0000ffffffffUL
@@ -19,7 +19,8 @@
 
 #define BNXT_PTP_QTS_TIMEOUT   1000
 #define BNXT_PTP_QTS_TX_ENABLES        (PORT_TS_QUERY_REQ_ENABLES_PTP_SEQ_ID | \
-                                PORT_TS_QUERY_REQ_ENABLES_TS_REQ_TIMEOUT)
+                                PORT_TS_QUERY_REQ_ENABLES_TS_REQ_TIMEOUT | \
+                                PORT_TS_QUERY_REQ_ENABLES_PTP_HDR_OFFSET)
 
 struct bnxt_ptp_cfg {
        struct ptp_clock_info   ptp_info;
@@ -32,7 +33,12 @@ struct bnxt_ptp_cfg {
        u64                     current_time;
        u64                     old_time;
        unsigned long           next_period;
+       unsigned long           next_overflow_check;
+       /* 48-bit PHC overflows in 78 hours.  Check overflow every 19 hours. */
+       #define BNXT_PHC_OVERFLOW_PERIOD        (19 * 3600 * HZ)
+
        u16                     tx_seqid;
+       u16                     tx_hdr_off;
        struct bnxt             *bp;
        atomic_t                tx_avail;
 #define BNXT_MAX_TX_TS 1
@@ -70,7 +76,7 @@ do {                                          \
        ((dst) = READ_ONCE(src))
 #endif
 
-int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id);
+int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id, u16 *hdr_off);
 int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr);
 int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr);
 int bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb);
index 5c368a9..c2e1f16 100644 (file)
@@ -275,6 +275,12 @@ void gem_ptp_rxstamp(struct macb *bp, struct sk_buff *skb,
 
        if (GEM_BFEXT(DMA_RXVALID, desc->addr)) {
                desc_ptp = macb_ptp_desc(bp, desc);
+               /* Unlikely but check */
+               if (!desc_ptp) {
+                       dev_warn_ratelimited(&bp->pdev->dev,
+                                            "Timestamp not supported in BD\n");
+                       return;
+               }
                gem_hw_timestamp(bp, desc_ptp->ts_1, desc_ptp->ts_2, &ts);
                memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
                shhwtstamps->hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
@@ -307,8 +313,11 @@ int gem_ptp_txstamp(struct macb_queue *queue, struct sk_buff *skb,
        if (CIRC_SPACE(head, tail, PTP_TS_BUFFER_SIZE) == 0)
                return -ENOMEM;
 
-       skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
        desc_ptp = macb_ptp_desc(queue->bp, desc);
+       /* Unlikely but check */
+       if (!desc_ptp)
+               return -EINVAL;
+       skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
        tx_timestamp = &queue->tx_timestamps[head];
        tx_timestamp->skb = skb;
        /* ensure ts_1/ts_2 is loaded after ctrl (TX_USED check) */
index dbf9a0e..710cb00 100644 (file)
@@ -5068,6 +5068,7 @@ static int adap_init0(struct adapter *adap, int vpd_skip)
                ret = -ENOMEM;
                goto bye;
        }
+       bitmap_zero(adap->sge.blocked_fl, adap->sge.egr_sz);
 #endif
 
        params[0] = FW_PARAM_PFVF(CLIP_START);
@@ -6788,13 +6789,11 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        setup_memwin(adapter);
        err = adap_init0(adapter, 0);
-#ifdef CONFIG_DEBUG_FS
-       bitmap_zero(adapter->sge.blocked_fl, adapter->sge.egr_sz);
-#endif
-       setup_memwin_rdma(adapter);
        if (err)
                goto out_unmap_bar;
 
+       setup_memwin_rdma(adapter);
+
        /* configure SGE_STAT_CFG_A to read WC stats */
        if (!is_t4(adapter->params.chip))
                t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7) |
index f6ff1f7..1876f15 100644 (file)
@@ -357,7 +357,7 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
        int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
        void __iomem *ioaddr;
 
-       i = pci_enable_device(pdev);
+       i = pcim_enable_device(pdev);
        if (i) return i;
 
        pci_set_master(pdev);
@@ -379,7 +379,7 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        ioaddr = pci_iomap(pdev, TULIP_BAR, netdev_res_size);
        if (!ioaddr)
-               goto err_out_free_res;
+               goto err_out_netdev;
 
        for (i = 0; i < 3; i++)
                ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(eeprom_read(ioaddr, i));
@@ -458,8 +458,6 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
 
 err_out_cleardev:
        pci_iounmap(pdev, ioaddr);
-err_out_free_res:
-       pci_release_regions(pdev);
 err_out_netdev:
        free_netdev (dev);
        return -ENODEV;
@@ -1526,7 +1524,6 @@ static void w840_remove1(struct pci_dev *pdev)
        if (dev) {
                struct netdev_private *np = netdev_priv(dev);
                unregister_netdev(dev);
-               pci_release_regions(pdev);
                pci_iounmap(pdev, np->base_addr);
                free_netdev(dev);
        }
index 68b7864..98cc013 100644 (file)
@@ -3038,26 +3038,30 @@ static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port)
        return err;
 }
 
-static void dpaa2_switch_takedown(struct fsl_mc_device *sw_dev)
+static void dpaa2_switch_ctrl_if_teardown(struct ethsw_core *ethsw)
+{
+       dpsw_ctrl_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
+       dpaa2_switch_free_dpio(ethsw);
+       dpaa2_switch_destroy_rings(ethsw);
+       dpaa2_switch_drain_bp(ethsw);
+       dpaa2_switch_free_dpbp(ethsw);
+}
+
+static void dpaa2_switch_teardown(struct fsl_mc_device *sw_dev)
 {
        struct device *dev = &sw_dev->dev;
        struct ethsw_core *ethsw = dev_get_drvdata(dev);
        int err;
 
+       dpaa2_switch_ctrl_if_teardown(ethsw);
+
+       destroy_workqueue(ethsw->workqueue);
+
        err = dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
        if (err)
                dev_warn(dev, "dpsw_close err %d\n", err);
 }
 
-static void dpaa2_switch_ctrl_if_teardown(struct ethsw_core *ethsw)
-{
-       dpsw_ctrl_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
-       dpaa2_switch_free_dpio(ethsw);
-       dpaa2_switch_destroy_rings(ethsw);
-       dpaa2_switch_drain_bp(ethsw);
-       dpaa2_switch_free_dpbp(ethsw);
-}
-
 static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev)
 {
        struct ethsw_port_priv *port_priv;
@@ -3068,8 +3072,6 @@ static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev)
        dev = &sw_dev->dev;
        ethsw = dev_get_drvdata(dev);
 
-       dpaa2_switch_ctrl_if_teardown(ethsw);
-
        dpaa2_switch_teardown_irqs(sw_dev);
 
        dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
@@ -3084,9 +3086,7 @@ static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev)
        kfree(ethsw->acls);
        kfree(ethsw->ports);
 
-       dpaa2_switch_takedown(sw_dev);
-
-       destroy_workqueue(ethsw->workqueue);
+       dpaa2_switch_teardown(sw_dev);
 
        fsl_mc_portal_free(ethsw->mc_io);
 
@@ -3199,7 +3199,7 @@ static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev)
                               GFP_KERNEL);
        if (!(ethsw->ports)) {
                err = -ENOMEM;
-               goto err_takedown;
+               goto err_teardown;
        }
 
        ethsw->fdbs = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->fdbs),
@@ -3270,8 +3270,8 @@ err_free_fdbs:
 err_free_ports:
        kfree(ethsw->ports);
 
-err_takedown:
-       dpaa2_switch_takedown(sw_dev);
+err_teardown:
+       dpaa2_switch_teardown(sw_dev);
 
 err_free_cmdport:
        fsl_mc_portal_free(ethsw->mc_io);
index 8aea707..7e4c498 100644 (file)
@@ -3843,13 +3843,13 @@ fec_drv_remove(struct platform_device *pdev)
        if (of_phy_is_fixed_link(np))
                of_phy_deregister_fixed_link(np);
        of_node_put(fep->phy_node);
-       free_netdev(ndev);
 
        clk_disable_unprepare(fep->clk_ahb);
        clk_disable_unprepare(fep->clk_ipg);
        pm_runtime_put_noidle(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
 
+       free_netdev(ndev);
        return 0;
 }
 
index 5325230..80461ab 100644 (file)
@@ -938,20 +938,19 @@ static int hns3_dbg_dev_info(struct hnae3_handle *h, char *buf, int len)
        return 0;
 }
 
-static int hns3_dbg_get_cmd_index(struct hnae3_handle *handle,
-                                 const unsigned char *name, u32 *index)
+static int hns3_dbg_get_cmd_index(struct hns3_dbg_data *dbg_data, u32 *index)
 {
        u32 i;
 
        for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++) {
-               if (!strncmp(name, hns3_dbg_cmd[i].name,
-                            strlen(hns3_dbg_cmd[i].name))) {
+               if (hns3_dbg_cmd[i].cmd == dbg_data->cmd) {
                        *index = i;
                        return 0;
                }
        }
 
-       dev_err(&handle->pdev->dev, "unknown command(%s)\n", name);
+       dev_err(&dbg_data->handle->pdev->dev, "unknown command(%d)\n",
+               dbg_data->cmd);
        return -EINVAL;
 }
 
@@ -1019,8 +1018,7 @@ static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer,
        u32 index;
        int ret;
 
-       ret = hns3_dbg_get_cmd_index(handle, filp->f_path.dentry->d_iname,
-                                    &index);
+       ret = hns3_dbg_get_cmd_index(dbg_data, &index);
        if (ret)
                return ret;
 
@@ -1090,6 +1088,7 @@ static int hns3_dbg_bd_file_init(struct hnae3_handle *handle, u32 cmd)
                char name[HNS3_DBG_FILE_NAME_LEN];
 
                data[i].handle = handle;
+               data[i].cmd = hns3_dbg_cmd[cmd].cmd;
                data[i].qid = i;
                sprintf(name, "%s%u", hns3_dbg_cmd[cmd].name, i);
                debugfs_create_file(name, 0400, entry_dir, &data[i],
@@ -1110,6 +1109,7 @@ hns3_dbg_common_file_init(struct hnae3_handle *handle, u32 cmd)
                return -ENOMEM;
 
        data->handle = handle;
+       data->cmd = hns3_dbg_cmd[cmd].cmd;
        entry_dir = hns3_dbg_dentry[hns3_dbg_cmd[cmd].dentry].dentry;
        debugfs_create_file(hns3_dbg_cmd[cmd].name, 0400, entry_dir,
                            data, &hns3_dbg_fops);
index f3766ff..bd88010 100644 (file)
@@ -22,6 +22,7 @@ struct hns3_dbg_item {
 
 struct hns3_dbg_data {
        struct hnae3_handle *handle;
+       enum hnae3_dbg_cmd cmd;
        u16 qid;
 };
 
index 887297e..eb748aa 100644 (file)
@@ -573,9 +573,13 @@ static void hclge_cmd_uninit_regs(struct hclge_hw *hw)
 
 void hclge_cmd_uninit(struct hclge_dev *hdev)
 {
+       set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+       /* wait to ensure that the firmware completes the possible left
+        * over commands.
+        */
+       msleep(HCLGE_CMDQ_CLEAR_WAIT_TIME);
        spin_lock_bh(&hdev->hw.cmq.csq.lock);
        spin_lock(&hdev->hw.cmq.crq.lock);
-       set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
        hclge_cmd_uninit_regs(&hdev->hw);
        spin_unlock(&hdev->hw.cmq.crq.lock);
        spin_unlock_bh(&hdev->hw.cmq.csq.lock);
index 18bde77..ac70d49 100644 (file)
@@ -9,6 +9,7 @@
 #include "hnae3.h"
 
 #define HCLGE_CMDQ_TX_TIMEOUT          30000
+#define HCLGE_CMDQ_CLEAR_WAIT_TIME     200
 #define HCLGE_DESC_DATA_LEN            6
 
 struct hclge_dev;
@@ -270,6 +271,9 @@ enum hclge_opcode_type {
        /* Led command */
        HCLGE_OPC_LED_STATUS_CFG        = 0xB000,
 
+       /* clear hardware resource command */
+       HCLGE_OPC_CLEAR_HW_RESOURCE     = 0x700B,
+
        /* NCL config command */
        HCLGE_OPC_QUERY_NCL_CONFIG      = 0x7011,
 
index 5bf5db9..39f56f2 100644 (file)
@@ -255,21 +255,12 @@ static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
        u64 requests[HNAE3_MAX_TC], indications[HNAE3_MAX_TC];
        struct hclge_vport *vport = hclge_get_vport(h);
        struct hclge_dev *hdev = vport->back;
-       u8 i, j, pfc_map, *prio_tc;
        int ret;
+       u8 i;
 
        memset(pfc, 0, sizeof(*pfc));
        pfc->pfc_cap = hdev->pfc_max;
-       prio_tc = hdev->tm_info.prio_tc;
-       pfc_map = hdev->tm_info.hw_pfc_map;
-
-       /* Pfc setting is based on TC */
-       for (i = 0; i < hdev->tm_info.num_tc; i++) {
-               for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) {
-                       if ((prio_tc[j] == i) && (pfc_map & BIT(i)))
-                               pfc->pfc_en |= BIT(j);
-               }
-       }
+       pfc->pfc_en = hdev->tm_info.pfc_en;
 
        ret = hclge_pfc_tx_stats_get(hdev, requests);
        if (ret)
index ebeaf12..03ae122 100644 (file)
@@ -1550,6 +1550,7 @@ static int hclge_configure(struct hclge_dev *hdev)
        hdev->tm_info.hw_pfc_map = 0;
        hdev->wanted_umv_size = cfg.umv_space;
        hdev->tx_spare_buf_size = cfg.tx_spare_buf_size;
+       hdev->gro_en = true;
        if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
                set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
 
@@ -1618,7 +1619,7 @@ static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
        return hclge_cmd_send(&hdev->hw, &desc, 1);
 }
 
-static int hclge_config_gro(struct hclge_dev *hdev, bool en)
+static int hclge_config_gro(struct hclge_dev *hdev)
 {
        struct hclge_cfg_gro_status_cmd *req;
        struct hclge_desc desc;
@@ -1630,7 +1631,7 @@ static int hclge_config_gro(struct hclge_dev *hdev, bool en)
        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
        req = (struct hclge_cfg_gro_status_cmd *)desc.data;
 
-       req->gro_en = en ? 1 : 0;
+       req->gro_en = hdev->gro_en ? 1 : 0;
 
        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
        if (ret)
@@ -2952,12 +2953,12 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
        }
 
        if (state != hdev->hw.mac.link) {
+               hdev->hw.mac.link = state;
                client->ops->link_status_change(handle, state);
                hclge_config_mac_tnl_int(hdev, state);
                if (rclient && rclient->ops->link_status_change)
                        rclient->ops->link_status_change(rhandle, state);
 
-               hdev->hw.mac.link = state;
                hclge_push_link_status(hdev);
        }
 
@@ -10073,7 +10074,11 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
                                       bool writen_to_tbl)
 {
-       struct hclge_vport_vlan_cfg *vlan;
+       struct hclge_vport_vlan_cfg *vlan, *tmp;
+
+       list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
+               if (vlan->vlan_id == vlan_id)
+                       return;
 
        vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
        if (!vlan)
@@ -11443,6 +11448,28 @@ static void hclge_clear_resetting_state(struct hclge_dev *hdev)
        }
 }
 
+static int hclge_clear_hw_resource(struct hclge_dev *hdev)
+{
+       struct hclge_desc desc;
+       int ret;
+
+       hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false);
+
+       ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+       /* This new command is only supported by new firmware, it will
+        * fail with older firmware. Error value -EOPNOSUPP can only be
+        * returned by older firmware running this command, to keep code
+        * backward compatible we will override this value and return
+        * success.
+        */
+       if (ret && ret != -EOPNOTSUPP) {
+               dev_err(&hdev->pdev->dev,
+                       "failed to clear hw resource, ret = %d\n", ret);
+               return ret;
+       }
+       return 0;
+}
+
 static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev)
 {
        if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
@@ -11492,6 +11519,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
        if (ret)
                goto err_cmd_uninit;
 
+       ret  = hclge_clear_hw_resource(hdev);
+       if (ret)
+               goto err_cmd_uninit;
+
        ret = hclge_get_cap(hdev);
        if (ret)
                goto err_cmd_uninit;
@@ -11556,7 +11587,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
                goto err_mdiobus_unreg;
        }
 
-       ret = hclge_config_gro(hdev, true);
+       ret = hclge_config_gro(hdev);
        if (ret)
                goto err_mdiobus_unreg;
 
@@ -11937,7 +11968,7 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
                return ret;
        }
 
-       ret = hclge_config_gro(hdev, true);
+       ret = hclge_config_gro(hdev);
        if (ret)
                return ret;
 
@@ -12671,8 +12702,15 @@ static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
 {
        struct hclge_vport *vport = hclge_get_vport(handle);
        struct hclge_dev *hdev = vport->back;
+       bool gro_en_old = hdev->gro_en;
+       int ret;
 
-       return hclge_config_gro(hdev, enable);
+       hdev->gro_en = enable;
+       ret = hclge_config_gro(hdev);
+       if (ret)
+               hdev->gro_en = gro_en_old;
+
+       return ret;
 }
 
 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
index 3d33524..e446b83 100644 (file)
@@ -927,6 +927,7 @@ struct hclge_dev {
        unsigned long fd_bmap[BITS_TO_LONGS(MAX_FD_FILTER_NUM)];
        enum HCLGE_FD_ACTIVE_RULE_TYPE fd_active_type;
        u8 fd_en;
+       bool gro_en;
 
        u16 wanted_umv_size;
        /* max available unicast mac vlan space */
index 3b1f845..befa9bc 100644 (file)
@@ -5,9 +5,27 @@
 #include "hclge_main.h"
 #include "hnae3.h"
 
+static int hclge_ptp_get_cycle(struct hclge_dev *hdev)
+{
+       struct hclge_ptp *ptp = hdev->ptp;
+
+       ptp->cycle.quo = readl(hdev->ptp->io_base + HCLGE_PTP_CYCLE_QUO_REG) &
+                        HCLGE_PTP_CYCLE_QUO_MASK;
+       ptp->cycle.numer = readl(hdev->ptp->io_base + HCLGE_PTP_CYCLE_NUM_REG);
+       ptp->cycle.den = readl(hdev->ptp->io_base + HCLGE_PTP_CYCLE_DEN_REG);
+
+       if (ptp->cycle.den == 0) {
+               dev_err(&hdev->pdev->dev, "invalid ptp cycle denominator!\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 static int hclge_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
 {
        struct hclge_dev *hdev = hclge_ptp_get_hdev(ptp);
+       struct hclge_ptp_cycle *cycle = &hdev->ptp->cycle;
        u64 adj_val, adj_base, diff;
        unsigned long flags;
        bool is_neg = false;
@@ -18,7 +36,7 @@ static int hclge_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
                is_neg = true;
        }
 
-       adj_base = HCLGE_PTP_CYCLE_ADJ_BASE * HCLGE_PTP_CYCLE_ADJ_UNIT;
+       adj_base = (u64)cycle->quo * (u64)cycle->den + (u64)cycle->numer;
        adj_val = adj_base * ppb;
        diff = div_u64(adj_val, 1000000000ULL);
 
@@ -29,16 +47,16 @@ static int hclge_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
 
        /* This clock cycle is defined by three part: quotient, numerator
         * and denominator. For example, 2.5ns, the quotient is 2,
-        * denominator is fixed to HCLGE_PTP_CYCLE_ADJ_UNIT, and numerator
-        * is 0.5 * HCLGE_PTP_CYCLE_ADJ_UNIT.
+        * denominator is fixed to ptp->cycle.den, and numerator
+        * is 0.5 * ptp->cycle.den.
         */
-       quo = div_u64_rem(adj_val, HCLGE_PTP_CYCLE_ADJ_UNIT, &numerator);
+       quo = div_u64_rem(adj_val, cycle->den, &numerator);
 
        spin_lock_irqsave(&hdev->ptp->lock, flags);
-       writel(quo, hdev->ptp->io_base + HCLGE_PTP_CYCLE_QUO_REG);
+       writel(quo & HCLGE_PTP_CYCLE_QUO_MASK,
+              hdev->ptp->io_base + HCLGE_PTP_CYCLE_QUO_REG);
        writel(numerator, hdev->ptp->io_base + HCLGE_PTP_CYCLE_NUM_REG);
-       writel(HCLGE_PTP_CYCLE_ADJ_UNIT,
-              hdev->ptp->io_base + HCLGE_PTP_CYCLE_DEN_REG);
+       writel(cycle->den, hdev->ptp->io_base + HCLGE_PTP_CYCLE_DEN_REG);
        writel(HCLGE_PTP_CYCLE_ADJ_EN,
               hdev->ptp->io_base + HCLGE_PTP_CYCLE_CFG_REG);
        spin_unlock_irqrestore(&hdev->ptp->lock, flags);
@@ -475,6 +493,10 @@ int hclge_ptp_init(struct hclge_dev *hdev)
                ret = hclge_ptp_create_clock(hdev);
                if (ret)
                        return ret;
+
+               ret = hclge_ptp_get_cycle(hdev);
+               if (ret)
+                       return ret;
        }
 
        ret = hclge_ptp_int_en(hdev, true);
index 5a202b7..dbf5f4c 100644 (file)
@@ -29,6 +29,7 @@
 #define HCLGE_PTP_TIME_ADJ_REG         0x60
 #define HCLGE_PTP_TIME_ADJ_EN          BIT(0)
 #define HCLGE_PTP_CYCLE_QUO_REG                0x64
+#define HCLGE_PTP_CYCLE_QUO_MASK       GENMASK(7, 0)
 #define HCLGE_PTP_CYCLE_DEN_REG                0x68
 #define HCLGE_PTP_CYCLE_NUM_REG                0x6C
 #define HCLGE_PTP_CYCLE_CFG_REG                0x70
@@ -37,9 +38,7 @@
 #define HCLGE_PTP_CUR_TIME_SEC_L_REG   0x78
 #define HCLGE_PTP_CUR_TIME_NSEC_REG    0x7C
 
-#define HCLGE_PTP_CYCLE_ADJ_BASE       2
 #define HCLGE_PTP_CYCLE_ADJ_MAX                500000000
-#define HCLGE_PTP_CYCLE_ADJ_UNIT       100000000
 #define HCLGE_PTP_SEC_H_OFFSET         32u
 #define HCLGE_PTP_SEC_L_MASK           GENMASK(31, 0)
 
 #define HCLGE_PTP_FLAG_TX_EN           1
 #define HCLGE_PTP_FLAG_RX_EN           2
 
+struct hclge_ptp_cycle {
+       u32 quo;
+       u32 numer;
+       u32 den;
+};
+
 struct hclge_ptp {
        struct hclge_dev *hdev;
        struct ptp_clock *clock;
@@ -58,6 +63,7 @@ struct hclge_ptp {
        spinlock_t lock;        /* protects ptp registers */
        u32 ptp_cfg;
        u32 last_tx_seqid;
+       struct hclge_ptp_cycle cycle;
        unsigned long tx_start;
        unsigned long tx_cnt;
        unsigned long tx_skipped;
index bd19a2d..d9ddb0a 100644 (file)
@@ -507,12 +507,17 @@ static void hclgevf_cmd_uninit_regs(struct hclgevf_hw *hw)
 
 void hclgevf_cmd_uninit(struct hclgevf_dev *hdev)
 {
+       set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+       /* wait to ensure that the firmware completes the possible left
+        * over commands.
+        */
+       msleep(HCLGEVF_CMDQ_CLEAR_WAIT_TIME);
        spin_lock_bh(&hdev->hw.cmq.csq.lock);
        spin_lock(&hdev->hw.cmq.crq.lock);
-       set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
        hclgevf_cmd_uninit_regs(&hdev->hw);
        spin_unlock(&hdev->hw.cmq.crq.lock);
        spin_unlock_bh(&hdev->hw.cmq.csq.lock);
+
        hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
        hclgevf_free_cmd_desc(&hdev->hw.cmq.crq);
 }
index 202feb7..5b82177 100644 (file)
@@ -8,6 +8,7 @@
 #include "hnae3.h"
 
 #define HCLGEVF_CMDQ_TX_TIMEOUT                30000
+#define HCLGEVF_CMDQ_CLEAR_WAIT_TIME   200
 #define HCLGEVF_CMDQ_RX_INVLD_B                0
 #define HCLGEVF_CMDQ_RX_OUTVLD_B       1
 
index 8784d61..9386547 100644 (file)
@@ -506,10 +506,10 @@ void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
        link_state =
                test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state;
        if (link_state != hdev->hw.mac.link) {
+               hdev->hw.mac.link = link_state;
                client->ops->link_status_change(handle, !!link_state);
                if (rclient && rclient->ops->link_status_change)
                        rclient->ops->link_status_change(rhandle, !!link_state);
-               hdev->hw.mac.link = link_state;
        }
 
        clear_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state);
@@ -2487,6 +2487,8 @@ static int hclgevf_configure(struct hclgevf_dev *hdev)
 {
        int ret;
 
+       hdev->gro_en = true;
+
        ret = hclgevf_get_basic_info(hdev);
        if (ret)
                return ret;
@@ -2549,7 +2551,7 @@ static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
        return 0;
 }
 
-static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en)
+static int hclgevf_config_gro(struct hclgevf_dev *hdev)
 {
        struct hclgevf_cfg_gro_status_cmd *req;
        struct hclgevf_desc desc;
@@ -2562,7 +2564,7 @@ static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en)
                                     false);
        req = (struct hclgevf_cfg_gro_status_cmd *)desc.data;
 
-       req->gro_en = en ? 1 : 0;
+       req->gro_en = hdev->gro_en ? 1 : 0;
 
        ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
        if (ret)
@@ -3308,7 +3310,7 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
                return ret;
        }
 
-       ret = hclgevf_config_gro(hdev, true);
+       ret = hclgevf_config_gro(hdev);
        if (ret)
                return ret;
 
@@ -3389,7 +3391,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
        if (ret)
                goto err_config;
 
-       ret = hclgevf_config_gro(hdev, true);
+       ret = hclgevf_config_gro(hdev);
        if (ret)
                goto err_config;
 
@@ -3638,8 +3640,15 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
 static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable)
 {
        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+       bool gro_en_old = hdev->gro_en;
+       int ret;
 
-       return hclgevf_config_gro(hdev, enable);
+       hdev->gro_en = enable;
+       ret = hclgevf_config_gro(hdev);
+       if (ret)
+               hdev->gro_en = gro_en_old;
+
+       return ret;
 }
 
 static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type,
index d7d0284..e8013be 100644 (file)
@@ -310,6 +310,8 @@ struct hclgevf_dev {
        u16 *vector_status;
        int *vector_irq;
 
+       bool gro_en;
+
        unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)];
 
        struct hclgevf_mac_table_cfg mac_table;
index 772b2f8..b339b9b 100644 (file)
@@ -323,8 +323,8 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
                        flag = (u8)msg_q[5];
 
                        /* update upper layer with new link link status */
-                       hclgevf_update_link_status(hdev, link_status);
                        hclgevf_update_speed_duplex(hdev, speed, duplex);
+                       hclgevf_update_link_status(hdev, link_status);
 
                        if (flag & HCLGE_MBX_PUSH_LINK_STATUS_EN)
                                set_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS,
index cf7b388..a80336c 100644 (file)
@@ -1006,6 +1006,8 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
 {
        u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
            link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
+       u16 max_ltr_enc_d = 0;  /* maximum LTR decoded by platform */
+       u16 lat_enc_d = 0;      /* latency decoded */
        u16 lat_enc = 0;        /* latency encoded */
 
        if (link) {
@@ -1059,7 +1061,17 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
                                     E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop);
                max_ltr_enc = max_t(u16, max_snoop, max_nosnoop);
 
-               if (lat_enc > max_ltr_enc)
+               lat_enc_d = (lat_enc & E1000_LTRV_VALUE_MASK) *
+                            (1U << (E1000_LTRV_SCALE_FACTOR *
+                            ((lat_enc & E1000_LTRV_SCALE_MASK)
+                            >> E1000_LTRV_SCALE_SHIFT)));
+
+               max_ltr_enc_d = (max_ltr_enc & E1000_LTRV_VALUE_MASK) *
+                                (1U << (E1000_LTRV_SCALE_FACTOR *
+                                ((max_ltr_enc & E1000_LTRV_SCALE_MASK)
+                                >> E1000_LTRV_SCALE_SHIFT)));
+
+               if (lat_enc_d > max_ltr_enc_d)
                        lat_enc = max_ltr_enc;
        }
 
@@ -4115,13 +4127,17 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
                return ret_val;
 
        if (!(data & valid_csum_mask)) {
-               data |= valid_csum_mask;
-               ret_val = e1000_write_nvm(hw, word, 1, &data);
-               if (ret_val)
-                       return ret_val;
-               ret_val = e1000e_update_nvm_checksum(hw);
-               if (ret_val)
-                       return ret_val;
+               e_dbg("NVM Checksum Invalid\n");
+
+               if (hw->mac.type < e1000_pch_cnp) {
+                       data |= valid_csum_mask;
+                       ret_val = e1000_write_nvm(hw, word, 1, &data);
+                       if (ret_val)
+                               return ret_val;
+                       ret_val = e1000e_update_nvm_checksum(hw);
+                       if (ret_val)
+                               return ret_val;
+               }
        }
 
        return e1000e_validate_nvm_checksum_generic(hw);
index 1502895..e757896 100644 (file)
 
 /* Latency Tolerance Reporting */
 #define E1000_LTRV                     0x000F8
+#define E1000_LTRV_VALUE_MASK          0x000003FF
 #define E1000_LTRV_SCALE_MAX           5
 #define E1000_LTRV_SCALE_FACTOR                5
+#define E1000_LTRV_SCALE_SHIFT         10
+#define E1000_LTRV_SCALE_MASK          0x00001C00
 #define E1000_LTRV_REQ_SHIFT           15
 #define E1000_LTRV_NOSNOOP_SHIFT       16
 #define E1000_LTRV_SEND                        (1 << 30)
index 3e822ba..2c9e4ee 100644 (file)
@@ -980,7 +980,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
        default:
                /* if we got here and link is up something bad is afoot */
                netdev_info(netdev,
-                           "WARNING: Link is up but PHY type 0x%x is not recognized.\n",
+                           "WARNING: Link is up but PHY type 0x%x is not recognized, or incorrect cable is in use\n",
                            hw_link_info->phy_type);
        }
 
@@ -5294,6 +5294,10 @@ flags_complete:
                                        dev_warn(&pf->pdev->dev,
                                                 "Device configuration forbids SW from starting the LLDP agent.\n");
                                        return -EINVAL;
+                               case I40E_AQ_RC_EAGAIN:
+                                       dev_warn(&pf->pdev->dev,
+                                                "Stop FW LLDP agent command is still being processed, please try again in a second.\n");
+                                       return -EBUSY;
                                default:
                                        dev_warn(&pf->pdev->dev,
                                                 "Starting FW LLDP agent failed: error: %s, %s\n",
index 861e59a..1d1f527 100644 (file)
@@ -4454,11 +4454,10 @@ int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
 }
 
 /**
- * i40e_vsi_control_tx - Start or stop a VSI's rings
+ * i40e_vsi_enable_tx - Start a VSI's rings
  * @vsi: the VSI being configured
- * @enable: start or stop the rings
  **/
-static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
+static int i40e_vsi_enable_tx(struct i40e_vsi *vsi)
 {
        struct i40e_pf *pf = vsi->back;
        int i, pf_q, ret = 0;
@@ -4467,7 +4466,7 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
        for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
                ret = i40e_control_wait_tx_q(vsi->seid, pf,
                                             pf_q,
-                                            false /*is xdp*/, enable);
+                                            false /*is xdp*/, true);
                if (ret)
                        break;
 
@@ -4476,7 +4475,7 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
 
                ret = i40e_control_wait_tx_q(vsi->seid, pf,
                                             pf_q + vsi->alloc_queue_pairs,
-                                            true /*is xdp*/, enable);
+                                            true /*is xdp*/, true);
                if (ret)
                        break;
        }
@@ -4574,32 +4573,25 @@ int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
 }
 
 /**
- * i40e_vsi_control_rx - Start or stop a VSI's rings
+ * i40e_vsi_enable_rx - Start a VSI's rings
  * @vsi: the VSI being configured
- * @enable: start or stop the rings
  **/
-static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
+static int i40e_vsi_enable_rx(struct i40e_vsi *vsi)
 {
        struct i40e_pf *pf = vsi->back;
        int i, pf_q, ret = 0;
 
        pf_q = vsi->base_queue;
        for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
-               ret = i40e_control_wait_rx_q(pf, pf_q, enable);
+               ret = i40e_control_wait_rx_q(pf, pf_q, true);
                if (ret) {
                        dev_info(&pf->pdev->dev,
-                                "VSI seid %d Rx ring %d %sable timeout\n",
-                                vsi->seid, pf_q, (enable ? "en" : "dis"));
+                                "VSI seid %d Rx ring %d enable timeout\n",
+                                vsi->seid, pf_q);
                        break;
                }
        }
 
-       /* Due to HW errata, on Rx disable only, the register can indicate done
-        * before it really is. Needs 50ms to be sure
-        */
-       if (!enable)
-               mdelay(50);
-
        return ret;
 }
 
@@ -4612,29 +4604,47 @@ int i40e_vsi_start_rings(struct i40e_vsi *vsi)
        int ret = 0;
 
        /* do rx first for enable and last for disable */
-       ret = i40e_vsi_control_rx(vsi, true);
+       ret = i40e_vsi_enable_rx(vsi);
        if (ret)
                return ret;
-       ret = i40e_vsi_control_tx(vsi, true);
+       ret = i40e_vsi_enable_tx(vsi);
 
        return ret;
 }
 
+#define I40E_DISABLE_TX_GAP_MSEC       50
+
 /**
  * i40e_vsi_stop_rings - Stop a VSI's rings
  * @vsi: the VSI being configured
  **/
 void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
 {
+       struct i40e_pf *pf = vsi->back;
+       int pf_q, err, q_end;
+
        /* When port TX is suspended, don't wait */
        if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state))
                return i40e_vsi_stop_rings_no_wait(vsi);
 
-       /* do rx first for enable and last for disable
-        * Ignore return value, we need to shutdown whatever we can
-        */
-       i40e_vsi_control_tx(vsi, false);
-       i40e_vsi_control_rx(vsi, false);
+       q_end = vsi->base_queue + vsi->num_queue_pairs;
+       for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
+               i40e_pre_tx_queue_cfg(&pf->hw, (u32)pf_q, false);
+
+       for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) {
+               err = i40e_control_wait_rx_q(pf, pf_q, false);
+               if (err)
+                       dev_info(&pf->pdev->dev,
+                                "VSI seid %d Rx ring %d dissable timeout\n",
+                                vsi->seid, pf_q);
+       }
+
+       msleep(I40E_DISABLE_TX_GAP_MSEC);
+       pf_q = vsi->base_queue;
+       for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
+               wr32(&pf->hw, I40E_QTX_ENA(pf_q), 0);
+
+       i40e_vsi_wait_queues_disabled(vsi);
 }
 
 /**
@@ -7280,6 +7290,8 @@ static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi,
        }
        if (vsi->num_queue_pairs <
            (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) {
+               dev_err(&vsi->back->pdev->dev,
+                       "Failed to create traffic channel, insufficient number of queues.\n");
                return -EINVAL;
        }
        if (sum_max_rate > i40e_get_link_speed(vsi)) {
@@ -13261,6 +13273,7 @@ static const struct net_device_ops i40e_netdev_ops = {
        .ndo_poll_controller    = i40e_netpoll,
 #endif
        .ndo_setup_tc           = __i40e_setup_tc,
+       .ndo_select_queue       = i40e_lan_select_queue,
        .ndo_set_features       = i40e_set_features,
        .ndo_set_vf_mac         = i40e_ndo_set_vf_mac,
        .ndo_set_vf_vlan        = i40e_ndo_set_vf_port_vlan,
index 38eb815..10a83e5 100644 (file)
@@ -3631,6 +3631,55 @@ dma_error:
        return -1;
 }
 
+static u16 i40e_swdcb_skb_tx_hash(struct net_device *dev,
+                                 const struct sk_buff *skb,
+                                 u16 num_tx_queues)
+{
+       u32 jhash_initval_salt = 0xd631614b;
+       u32 hash;
+
+       if (skb->sk && skb->sk->sk_hash)
+               hash = skb->sk->sk_hash;
+       else
+               hash = (__force u16)skb->protocol ^ skb->hash;
+
+       hash = jhash_1word(hash, jhash_initval_salt);
+
+       return (u16)(((u64)hash * num_tx_queues) >> 32);
+}
+
+u16 i40e_lan_select_queue(struct net_device *netdev,
+                         struct sk_buff *skb,
+                         struct net_device __always_unused *sb_dev)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_hw *hw;
+       u16 qoffset;
+       u16 qcount;
+       u8 tclass;
+       u16 hash;
+       u8 prio;
+
+       /* is DCB enabled at all? */
+       if (vsi->tc_config.numtc == 1)
+               return netdev_pick_tx(netdev, skb, sb_dev);
+
+       prio = skb->priority;
+       hw = &vsi->back->hw;
+       tclass = hw->local_dcbx_config.etscfg.prioritytable[prio];
+       /* sanity check */
+       if (unlikely(!(vsi->tc_config.enabled_tc & BIT(tclass))))
+               tclass = 0;
+
+       /* select a queue assigned for the given TC */
+       qcount = vsi->tc_config.tc_info[tclass].qcount;
+       hash = i40e_swdcb_skb_tx_hash(netdev, skb, qcount);
+
+       qoffset = vsi->tc_config.tc_info[tclass].qoffset;
+       return qoffset + hash;
+}
+
 /**
  * i40e_xmit_xdp_ring - transmits an XDP buffer to an XDP Tx ring
  * @xdpf: data to transmit
index 86fed05..bfc2845 100644 (file)
@@ -451,6 +451,8 @@ static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring)
 
 bool i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
+u16 i40e_lan_select_queue(struct net_device *netdev, struct sk_buff *skb,
+                         struct net_device *sb_dev);
 void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
 void i40e_clean_rx_ring(struct i40e_ring *rx_ring);
 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring);
index e8bd041..90793b3 100644 (file)
@@ -136,6 +136,7 @@ struct iavf_q_vector {
 struct iavf_mac_filter {
        struct list_head list;
        u8 macaddr[ETH_ALEN];
+       bool is_new_mac;        /* filter is new, wait for PF decision */
        bool remove;            /* filter needs to be removed */
        bool add;               /* filter needs to be added */
 };
index 44bafed..606a01c 100644 (file)
@@ -751,6 +751,7 @@ struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
 
                list_add_tail(&f->list, &adapter->mac_filter_list);
                f->add = true;
+               f->is_new_mac = true;
                adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
        } else {
                f->remove = false;
@@ -1506,11 +1507,6 @@ static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter)
        set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
 
        iavf_map_rings_to_vectors(adapter);
-
-       if (RSS_AQ(adapter))
-               adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
-       else
-               err = iavf_init_rss(adapter);
 err:
        return err;
 }
@@ -2200,6 +2196,14 @@ continue_reset:
                        goto reset_err;
        }
 
+       if (RSS_AQ(adapter)) {
+               adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
+       } else {
+               err = iavf_init_rss(adapter);
+               if (err)
+                       goto reset_err;
+       }
+
        adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG;
        adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
 
index 0eab3c4..3c73596 100644 (file)
@@ -540,6 +540,47 @@ void iavf_del_ether_addrs(struct iavf_adapter *adapter)
        kfree(veal);
 }
 
+/**
+ * iavf_mac_add_ok
+ * @adapter: adapter structure
+ *
+ * Submit list of filters based on PF response.
+ **/
+static void iavf_mac_add_ok(struct iavf_adapter *adapter)
+{
+       struct iavf_mac_filter *f, *ftmp;
+
+       spin_lock_bh(&adapter->mac_vlan_list_lock);
+       list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
+               f->is_new_mac = false;
+       }
+       spin_unlock_bh(&adapter->mac_vlan_list_lock);
+}
+
+/**
+ * iavf_mac_add_reject
+ * @adapter: adapter structure
+ *
+ * Remove filters from list based on PF response.
+ **/
+static void iavf_mac_add_reject(struct iavf_adapter *adapter)
+{
+       struct net_device *netdev = adapter->netdev;
+       struct iavf_mac_filter *f, *ftmp;
+
+       spin_lock_bh(&adapter->mac_vlan_list_lock);
+       list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
+               if (f->remove && ether_addr_equal(f->macaddr, netdev->dev_addr))
+                       f->remove = false;
+
+               if (f->is_new_mac) {
+                       list_del(&f->list);
+                       kfree(f);
+               }
+       }
+       spin_unlock_bh(&adapter->mac_vlan_list_lock);
+}
+
 /**
  * iavf_add_vlans
  * @adapter: adapter structure
@@ -1492,6 +1533,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
                case VIRTCHNL_OP_ADD_ETH_ADDR:
                        dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n",
                                iavf_stat_str(&adapter->hw, v_retval));
+                       iavf_mac_add_reject(adapter);
                        /* restore administratively set MAC address */
                        ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
                        break;
@@ -1639,10 +1681,11 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
                }
        }
        switch (v_opcode) {
-       case VIRTCHNL_OP_ADD_ETH_ADDR: {
+       case VIRTCHNL_OP_ADD_ETH_ADDR:
+               if (!v_retval)
+                       iavf_mac_add_ok(adapter);
                if (!ether_addr_equal(netdev->dev_addr, adapter->hw.mac.addr))
                        ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
-               }
                break;
        case VIRTCHNL_OP_GET_STATS: {
                struct iavf_eth_stats *stats =
index a450343..eadcb99 100644 (file)
@@ -234,6 +234,7 @@ enum ice_pf_state {
        ICE_VFLR_EVENT_PENDING,
        ICE_FLTR_OVERFLOW_PROMISC,
        ICE_VF_DIS,
+       ICE_VF_DEINIT_IN_PROGRESS,
        ICE_CFG_BUSY,
        ICE_SERVICE_SCHED,
        ICE_SERVICE_DIS,
index 91b545a..7fe6e8e 100644 (file)
@@ -42,7 +42,9 @@ static int ice_info_pba(struct ice_pf *pf, struct ice_info_ctx *ctx)
 
        status = ice_read_pba_string(hw, (u8 *)ctx->buf, sizeof(ctx->buf));
        if (status)
-               return -EIO;
+               /* We failed to locate the PBA, so just skip this entry */
+               dev_dbg(ice_pf_to_dev(pf), "Failed to read Product Board Assembly string, status %s\n",
+                       ice_stat_str(status));
 
        return 0;
 }
index ef8d181..fe2ded7 100644 (file)
@@ -191,6 +191,14 @@ static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
        struct ice_netdev_priv *np = netdev_priv(netdev);
        struct ice_vsi *vsi = np->vsi;
 
+       /* Under some circumstances, we might receive a request to delete our
+        * own device address from our uc list. Because we store the device
+        * address in the VSI's MAC filter list, we need to ignore such
+        * requests and not delete our device address from this list.
+        */
+       if (ether_addr_equal(addr, netdev->dev_addr))
+               return 0;
+
        if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr,
                                     ICE_FWD_TO_VSI))
                return -EINVAL;
@@ -4194,6 +4202,11 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
        struct ice_hw *hw;
        int i, err;
 
+       if (pdev->is_virtfn) {
+               dev_err(dev, "can't probe a virtual function\n");
+               return -EINVAL;
+       }
+
        /* this driver uses devres, see
         * Documentation/driver-api/driver-model/devres.rst
         */
@@ -5119,7 +5132,7 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
                return -EADDRNOTAVAIL;
 
        if (ether_addr_equal(netdev->dev_addr, mac)) {
-               netdev_warn(netdev, "already using mac %pM\n", mac);
+               netdev_dbg(netdev, "already using mac %pM\n", mac);
                return 0;
        }
 
@@ -5130,6 +5143,7 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
                return -EBUSY;
        }
 
+       netif_addr_lock_bh(netdev);
        /* Clean up old MAC filter. Not an error if old filter doesn't exist */
        status = ice_fltr_remove_mac(vsi, netdev->dev_addr, ICE_FWD_TO_VSI);
        if (status && status != ICE_ERR_DOES_NOT_EXIST) {
@@ -5139,30 +5153,28 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
 
        /* Add filter for new MAC. If filter exists, return success */
        status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
-       if (status == ICE_ERR_ALREADY_EXISTS) {
+       if (status == ICE_ERR_ALREADY_EXISTS)
                /* Although this MAC filter is already present in hardware it's
                 * possible in some cases (e.g. bonding) that dev_addr was
                 * modified outside of the driver and needs to be restored back
                 * to this value.
                 */
-               memcpy(netdev->dev_addr, mac, netdev->addr_len);
                netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
-               return 0;
-       }
-
-       /* error if the new filter addition failed */
-       if (status)
+       else if (status)
+               /* error if the new filter addition failed */
                err = -EADDRNOTAVAIL;
 
 err_update_filters:
        if (err) {
                netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
                           mac);
+               netif_addr_unlock_bh(netdev);
                return err;
        }
 
        /* change the netdev's MAC address */
        memcpy(netdev->dev_addr, mac, netdev->addr_len);
+       netif_addr_unlock_bh(netdev);
        netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
                   netdev->dev_addr);
 
index 5d5207b..9e3ddb9 100644 (file)
@@ -656,7 +656,7 @@ static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan,
         * maintaining phase
         */
        if (start_time < current_time)
-               start_time = div64_u64(current_time + NSEC_PER_MSEC - 1,
+               start_time = div64_u64(current_time + NSEC_PER_SEC - 1,
                                       NSEC_PER_SEC) * NSEC_PER_SEC + phase;
 
        start_time -= E810_OUT_PROP_DELAY_NS;
index 2826570..e93430a 100644 (file)
@@ -615,6 +615,8 @@ void ice_free_vfs(struct ice_pf *pf)
        struct ice_hw *hw = &pf->hw;
        unsigned int tmp, i;
 
+       set_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state);
+
        if (!pf->vf)
                return;
 
@@ -680,6 +682,7 @@ void ice_free_vfs(struct ice_pf *pf)
                                i);
 
        clear_bit(ICE_VF_DIS, pf->state);
+       clear_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state);
        clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
 }
 
@@ -4415,6 +4418,10 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
        struct device *dev;
        int err = 0;
 
+       /* if de-init is underway, don't process messages from VF */
+       if (test_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state))
+               return;
+
        dev = ice_pf_to_dev(pf);
        if (ice_validate_vf_id(pf, vf_id)) {
                err = -EINVAL;
index e29aadb..ed2d66b 100644 (file)
@@ -149,6 +149,9 @@ static void igc_release_hw_control(struct igc_adapter *adapter)
        struct igc_hw *hw = &adapter->hw;
        u32 ctrl_ext;
 
+       if (!pci_device_is_present(adapter->pdev))
+               return;
+
        /* Let firmware take over control of h/w */
        ctrl_ext = rd32(IGC_CTRL_EXT);
        wr32(IGC_CTRL_EXT,
@@ -4449,26 +4452,29 @@ void igc_down(struct igc_adapter *adapter)
 
        igc_ptp_suspend(adapter);
 
-       /* disable receives in the hardware */
-       rctl = rd32(IGC_RCTL);
-       wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN);
-       /* flush and sleep below */
-
+       if (pci_device_is_present(adapter->pdev)) {
+               /* disable receives in the hardware */
+               rctl = rd32(IGC_RCTL);
+               wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN);
+               /* flush and sleep below */
+       }
        /* set trans_start so we don't get spurious watchdogs during reset */
        netif_trans_update(netdev);
 
        netif_carrier_off(netdev);
        netif_tx_stop_all_queues(netdev);
 
-       /* disable transmits in the hardware */
-       tctl = rd32(IGC_TCTL);
-       tctl &= ~IGC_TCTL_EN;
-       wr32(IGC_TCTL, tctl);
-       /* flush both disables and wait for them to finish */
-       wrfl();
-       usleep_range(10000, 20000);
+       if (pci_device_is_present(adapter->pdev)) {
+               /* disable transmits in the hardware */
+               tctl = rd32(IGC_TCTL);
+               tctl &= ~IGC_TCTL_EN;
+               wr32(IGC_TCTL, tctl);
+               /* flush both disables and wait for them to finish */
+               wrfl();
+               usleep_range(10000, 20000);
 
-       igc_irq_disable(adapter);
+               igc_irq_disable(adapter);
+       }
 
        adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
 
@@ -5489,7 +5495,7 @@ static bool validate_schedule(struct igc_adapter *adapter,
                if (e->command != TC_TAPRIO_CMD_SET_GATES)
                        return false;
 
-               for (i = 0; i < IGC_MAX_TX_QUEUES; i++) {
+               for (i = 0; i < adapter->num_tx_queues; i++) {
                        if (e->gate_mask & BIT(i))
                                queue_uses[i]++;
 
@@ -5546,7 +5552,7 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
 
                end_time += e->interval;
 
-               for (i = 0; i < IGC_MAX_TX_QUEUES; i++) {
+               for (i = 0; i < adapter->num_tx_queues; i++) {
                        struct igc_ring *ring = adapter->tx_ring[i];
 
                        if (!(e->gate_mask & BIT(i)))
index 69617d2..4ae19c6 100644 (file)
@@ -849,7 +849,8 @@ void igc_ptp_suspend(struct igc_adapter *adapter)
        adapter->ptp_tx_skb = NULL;
        clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
 
-       igc_ptp_time_save(adapter);
+       if (pci_device_is_present(adapter->pdev))
+               igc_ptp_time_save(adapter);
 }
 
 /**
index 96dd1a4..b1d22e4 100644 (file)
@@ -52,8 +52,11 @@ static int ixgbe_xsk_pool_enable(struct ixgbe_adapter *adapter,
 
                /* Kick start the NAPI context so that receiving will start */
                err = ixgbe_xsk_wakeup(adapter->netdev, qid, XDP_WAKEUP_RX);
-               if (err)
+               if (err) {
+                       clear_bit(qid, adapter->af_xdp_zc_qps);
+                       xsk_pool_dma_unmap(pool, IXGBE_RX_DMA_ATTR);
                        return err;
+               }
        }
 
        return 0;
index 76a7777..de32e5b 100644 (file)
 #define        MVNETA_VLAN_PRIO_TO_RXQ                  0x2440
 #define      MVNETA_VLAN_PRIO_RXQ_MAP(prio, rxq) ((rxq) << ((prio) * 3))
 #define MVNETA_PORT_STATUS                       0x2444
-#define      MVNETA_TX_IN_PRGRS                  BIT(1)
+#define      MVNETA_TX_IN_PRGRS                  BIT(0)
 #define      MVNETA_TX_FIFO_EMPTY                BIT(8)
 #define MVNETA_RX_MIN_FRAME_SIZE                 0x247c
 /* Only exists on Armada XP and Armada 370 */
index b9fbc9f..cf8acab 100644 (file)
@@ -938,7 +938,7 @@ enum mvpp22_ptp_packet_format {
 #define MVPP2_BM_COOKIE_POOL_OFFS      8
 #define MVPP2_BM_COOKIE_CPU_OFFS       24
 
-#define MVPP2_BM_SHORT_FRAME_SIZE      704     /* frame size 128 */
+#define MVPP2_BM_SHORT_FRAME_SIZE      736     /* frame size 128 */
 #define MVPP2_BM_LONG_FRAME_SIZE       2240    /* frame size 1664 */
 #define MVPP2_BM_JUMBO_FRAME_SIZE      10432   /* frame size 9856 */
 /* BM short pool packet size
index 9169849..544c96c 100644 (file)
@@ -1504,8 +1504,8 @@ static int cgx_lmac_init(struct cgx *cgx)
 
                /* Add reference */
                cgx->lmac_idmap[lmac->lmac_id] = lmac;
-               cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, true);
                set_bit(lmac->lmac_id, &cgx->lmac_bmap);
+               cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, true);
        }
 
        return cgx_lmac_verify_fwi_version(cgx);
index 19bad9a..243cf80 100644 (file)
@@ -151,7 +151,10 @@ enum npc_kpu_lh_ltype {
  * Software assigns pkind for each incoming port such as CGX
  * Ethernet interfaces, LBK interfaces, etc.
  */
+#define NPC_UNRESERVED_PKIND_COUNT NPC_RX_VLAN_EXDSA_PKIND
+
 enum npc_pkind_type {
+       NPC_RX_LBK_PKIND = 0ULL,
        NPC_RX_VLAN_EXDSA_PKIND = 56ULL,
        NPC_RX_CHLEN24B_PKIND = 57ULL,
        NPC_RX_CPT_HDR_PKIND,
index 017163f..5fe277e 100644 (file)
@@ -391,8 +391,10 @@ void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf)
 
        /* Get numVFs attached to this PF and first HWVF */
        cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
-       *numvfs = (cfg >> 12) & 0xFF;
-       *hwvf = cfg & 0xFFF;
+       if (numvfs)
+               *numvfs = (cfg >> 12) & 0xFF;
+       if (hwvf)
+               *hwvf = cfg & 0xFFF;
 }
 
 static int rvu_get_hwvf(struct rvu *rvu, int pcifunc)
index 0933699..4bfbbdf 100644 (file)
@@ -196,11 +196,22 @@ static void nix_rx_sync(struct rvu *rvu, int blkaddr)
 {
        int err;
 
-       /*Sync all in flight RX packets to LLC/DRAM */
+       /* Sync all in flight RX packets to LLC/DRAM */
        rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
        err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
        if (err)
-               dev_err(rvu->dev, "NIX RX software sync failed\n");
+               dev_err(rvu->dev, "SYNC1: NIX RX software sync failed\n");
+
+       /* SW_SYNC ensures all existing transactions are finished and pkts
+        * are written to LLC/DRAM, queues should be teared down after
+        * successful SW_SYNC. Due to a HW errata, in some rare scenarios
+        * an existing transaction might end after SW_SYNC operation. To
+        * ensure operation is fully done, do the SW_SYNC twice.
+        */
+       rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
+       err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
+       if (err)
+               dev_err(rvu->dev, "SYNC2: NIX RX software sync failed\n");
 }
 
 static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
@@ -298,6 +309,7 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
                                        rvu_nix_chan_lbk(rvu, lbkid, vf + 1);
                pfvf->rx_chan_cnt = 1;
                pfvf->tx_chan_cnt = 1;
+               rvu_npc_set_pkind(rvu, NPC_RX_LBK_PKIND, pfvf);
                rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
                                              pfvf->rx_chan_base,
                                              pfvf->rx_chan_cnt);
@@ -3842,7 +3854,6 @@ static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr,
                vlan = &nix_hw->txvlan;
                kfree(vlan->rsrc.bmap);
                mutex_destroy(&vlan->rsrc_lock);
-               devm_kfree(rvu->dev, vlan->entry2pfvf_map);
 
                mcast = &nix_hw->mcast;
                qmem_free(rvu->dev, mcast->mce_ctx);
index 1097291..52b2554 100644 (file)
@@ -1721,7 +1721,6 @@ static void npc_parser_profile_init(struct rvu *rvu, int blkaddr)
 {
        struct rvu_hwinfo *hw = rvu->hw;
        int num_pkinds, num_kpus, idx;
-       struct npc_pkind *pkind;
 
        /* Disable all KPUs and their entries */
        for (idx = 0; idx < hw->npc_kpus; idx++) {
@@ -1739,9 +1738,8 @@ static void npc_parser_profile_init(struct rvu *rvu, int blkaddr)
         * Check HW max count to avoid configuring junk or
         * writing to unsupported CSR addresses.
         */
-       pkind = &hw->pkind;
        num_pkinds = rvu->kpu.pkinds;
-       num_pkinds = min_t(int, pkind->rsrc.max, num_pkinds);
+       num_pkinds = min_t(int, hw->npc_pkinds, num_pkinds);
 
        for (idx = 0; idx < num_pkinds; idx++)
                npc_config_kpuaction(rvu, blkaddr, &rvu->kpu.ikpu[idx], 0, idx, true);
@@ -1891,7 +1889,8 @@ static void rvu_npc_hw_init(struct rvu *rvu, int blkaddr)
        if (npc_const1 & BIT_ULL(63))
                npc_const2 = rvu_read64(rvu, blkaddr, NPC_AF_CONST2);
 
-       pkind->rsrc.max = (npc_const1 >> 12) & 0xFFULL;
+       pkind->rsrc.max = NPC_UNRESERVED_PKIND_COUNT;
+       hw->npc_pkinds = (npc_const1 >> 12) & 0xFFULL;
        hw->npc_kpu_entries = npc_const1 & 0xFFFULL;
        hw->npc_kpus = (npc_const >> 8) & 0x1FULL;
        hw->npc_intfs = npc_const & 0xFULL;
@@ -2002,6 +2001,10 @@ int rvu_npc_init(struct rvu *rvu)
        err = rvu_alloc_bitmap(&pkind->rsrc);
        if (err)
                return err;
+       /* Reserve PKIND#0 for LBKs. Power reset value of LBK_CH_PKIND is '0',
+        * no need to configure PKIND for all LBKs separately.
+        */
+       rvu_alloc_rsrc(&pkind->rsrc);
 
        /* Allocate mem for pkind to PF and channel mapping info */
        pkind->pfchan_map = devm_kcalloc(rvu->dev, pkind->rsrc.max,
index 2e53797..820adf3 100644 (file)
@@ -71,8 +71,8 @@ static int rvu_switch_install_rules(struct rvu *rvu)
        struct rvu_switch *rswitch = &rvu->rswitch;
        u16 start = rswitch->start_entry;
        struct rvu_hwinfo *hw = rvu->hw;
-       int pf, vf, numvfs, hwvf;
        u16 pcifunc, entry = 0;
+       int pf, vf, numvfs;
        int err;
 
        for (pf = 1; pf < hw->total_pfs; pf++) {
@@ -110,8 +110,8 @@ static int rvu_switch_install_rules(struct rvu *rvu)
 
                rswitch->entry2pcifunc[entry++] = pcifunc;
 
-               rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
-               for (vf = 0; vf < numvfs; vf++, hwvf++) {
+               rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL);
+               for (vf = 0; vf < numvfs; vf++) {
                        pcifunc = pf << 10 | ((vf + 1) & 0x3FF);
                        rvu_get_nix_blkaddr(rvu, pcifunc);
 
@@ -198,7 +198,7 @@ void rvu_switch_disable(struct rvu *rvu)
        struct npc_mcam_free_entry_req free_req = { 0 };
        struct rvu_switch *rswitch = &rvu->rswitch;
        struct rvu_hwinfo *hw = rvu->hw;
-       int pf, vf, numvfs, hwvf;
+       int pf, vf, numvfs;
        struct msg_rsp rsp;
        u16 pcifunc;
        int err;
@@ -217,7 +217,8 @@ void rvu_switch_disable(struct rvu *rvu)
                                "Reverting RX rule for PF%d failed(%d)\n",
                                pf, err);
 
-               for (vf = 0; vf < numvfs; vf++, hwvf++) {
+               rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL);
+               for (vf = 0; vf < numvfs; vf++) {
                        pcifunc = pf << 10 | ((vf + 1) & 0x3FF);
                        err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF);
                        if (err)
index 7cccd80..70fcc1f 100644 (file)
@@ -924,12 +924,14 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
                aq->cq.drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, cq->cqe_cnt);
                aq->cq.drop_ena = 1;
 
-               /* Enable receive CQ backpressure */
-               aq->cq.bp_ena = 1;
-               aq->cq.bpid = pfvf->bpid[0];
+               if (!is_otx2_lbkvf(pfvf->pdev)) {
+                       /* Enable receive CQ backpressure */
+                       aq->cq.bp_ena = 1;
+                       aq->cq.bpid = pfvf->bpid[0];
 
-               /* Set backpressure level is same as cq pass level */
-               aq->cq.bp = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
+                       /* Set backpressure level is same as cq pass level */
+                       aq->cq.bp = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
+               }
        }
 
        /* Fill AQ info */
@@ -1186,7 +1188,7 @@ static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
        aq->aura.fc_hyst_bits = 0; /* Store count on all updates */
 
        /* Enable backpressure for RQ aura */
-       if (aura_id < pfvf->hw.rqpool_cnt) {
+       if (aura_id < pfvf->hw.rqpool_cnt && !is_otx2_lbkvf(pfvf->pdev)) {
                aq->aura.bp_ena = 0;
                aq->aura.nix0_bpid = pfvf->bpid[0];
                /* Set backpressure level for RQ's Aura */
index 8df748e..b906a0e 100644 (file)
@@ -298,15 +298,14 @@ static int otx2_set_channels(struct net_device *dev,
        err = otx2_set_real_num_queues(dev, channel->tx_count,
                                       channel->rx_count);
        if (err)
-               goto fail;
+               return err;
 
        pfvf->hw.rx_queues = channel->rx_count;
        pfvf->hw.tx_queues = channel->tx_count;
        pfvf->qset.cq_cnt = pfvf->hw.tx_queues +  pfvf->hw.rx_queues;
 
-fail:
        if (if_up)
-               dev->netdev_ops->ndo_open(dev);
+               err = dev->netdev_ops->ndo_open(dev);
 
        netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n",
                    pfvf->hw.tx_queues, pfvf->hw.rx_queues);
@@ -410,7 +409,7 @@ static int otx2_set_ringparam(struct net_device *netdev,
        qs->rqe_cnt = rx_count;
 
        if (if_up)
-               netdev->netdev_ops->ndo_open(netdev);
+               return netdev->netdev_ops->ndo_open(netdev);
 
        return 0;
 }
index f300b80..2c24944 100644 (file)
@@ -1662,6 +1662,7 @@ int otx2_open(struct net_device *netdev)
 err_tx_stop_queues:
        netif_tx_stop_all_queues(netdev);
        netif_carrier_off(netdev);
+       pf->flags |= OTX2_FLAG_INTF_DOWN;
 err_free_cints:
        otx2_free_cints(pf, qidx);
        vec = pci_irq_vector(pf->pdev,
@@ -1689,6 +1690,10 @@ int otx2_stop(struct net_device *netdev)
        struct otx2_rss_info *rss;
        int qidx, vec, wrk;
 
+       /* If the DOWN flag is set resources are already freed */
+       if (pf->flags & OTX2_FLAG_INTF_DOWN)
+               return 0;
+
        netif_carrier_off(netdev);
        netif_tx_stop_all_queues(netdev);
 
index d12e21d..fa7a068 100644 (file)
@@ -530,6 +530,8 @@ err_trap_register:
                prestera_trap = &prestera_trap_items_arr[i];
                devlink_traps_unregister(devlink, &prestera_trap->trap, 1);
        }
+       devlink_trap_groups_unregister(devlink, prestera_trap_groups_arr,
+                                      groups_count);
 err_groups_register:
        kfree(trap_data->trap_items_arr);
 err_trap_items_alloc:
index 0b3e8f2..9a30916 100644 (file)
@@ -748,7 +748,7 @@ static void
 prestera_fdb_offload_notify(struct prestera_port *port,
                            struct switchdev_notifier_fdb_info *info)
 {
-       struct switchdev_notifier_fdb_info send_info;
+       struct switchdev_notifier_fdb_info send_info = {};
 
        send_info.addr = info->addr;
        send_info.vid = info->vid;
@@ -1123,7 +1123,7 @@ static int prestera_switchdev_blk_event(struct notifier_block *unused,
 static void prestera_fdb_event(struct prestera_switch *sw,
                               struct prestera_event *evt, void *arg)
 {
-       struct switchdev_notifier_fdb_info info;
+       struct switchdev_notifier_fdb_info info = {};
        struct net_device *dev = NULL;
        struct prestera_port *port;
        struct prestera_lag *lag;
index 00c8465..28ac469 100644 (file)
@@ -3535,6 +3535,7 @@ slave_start:
 
                if (!SRIOV_VALID_STATE(dev->flags)) {
                        mlx4_err(dev, "Invalid SRIOV state\n");
+                       err = -EINVAL;
                        goto err_close;
                }
        }
index df3e493..360e093 100644 (file)
@@ -134,6 +134,7 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
                              cq->cqn);
 
        cq->uar = dev->priv.uar;
+       cq->irqn = eq->core.irqn;
 
        return 0;
 
index ceebfc2..def2156 100644 (file)
@@ -500,10 +500,7 @@ static int next_phys_dev(struct device *dev, const void *data)
        return 1;
 }
 
-/* This function is called with two flows:
- * 1. During initialization of mlx5_core_dev and we don't need to lock it.
- * 2. During LAG configure stage and caller holds &mlx5_intf_mutex.
- */
+/* Must be called with intf_mutex held */
 struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
 {
        struct auxiliary_device *adev;
index 01a1d02..3f8a980 100644 (file)
@@ -1019,12 +1019,19 @@ int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer)
        MLX5_NB_INIT(&tracer->nb, fw_tracer_event, DEVICE_TRACER);
        mlx5_eq_notifier_register(dev, &tracer->nb);
 
-       mlx5_fw_tracer_start(tracer);
-
+       err = mlx5_fw_tracer_start(tracer);
+       if (err) {
+               mlx5_core_warn(dev, "FWTracer: Failed to start tracer %d\n", err);
+               goto err_notifier_unregister;
+       }
        return 0;
 
+err_notifier_unregister:
+       mlx5_eq_notifier_unregister(dev, &tracer->nb);
+       mlx5_core_destroy_mkey(dev, &tracer->buff.mkey);
 err_dealloc_pd:
        mlx5_core_dealloc_pd(dev, tracer->buff.pdn);
+       cancel_work_sync(&tracer->read_fw_strings_work);
        return err;
 }
 
index 150c8e8..2cbf18c 100644 (file)
@@ -471,6 +471,15 @@ static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev,
        param->cq_period_mode = params->rx_cq_moderation.cq_period_mode;
 }
 
+static u8 rq_end_pad_mode(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
+{
+       bool ro = pcie_relaxed_ordering_enabled(mdev->pdev) &&
+               MLX5_CAP_GEN(mdev, relaxed_ordering_write);
+
+       return ro && params->lro_en ?
+               MLX5_WQ_END_PAD_MODE_NONE : MLX5_WQ_END_PAD_MODE_ALIGN;
+}
+
 int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
                         struct mlx5e_params *params,
                         struct mlx5e_xsk_param *xsk,
@@ -508,7 +517,7 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
        }
 
        MLX5_SET(wq, wq, wq_type,          params->rq_wq_type);
-       MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
+       MLX5_SET(wq, wq, end_padding_mode, rq_end_pad_mode(mdev, params));
        MLX5_SET(wq, wq, log_wq_stride,
                 mlx5e_get_rqwq_log_stride(params->rq_wq_type, ndsegs));
        MLX5_SET(wq, wq, pd,               mdev->mlx5e_res.hw_objs.pdn);
index 778e229..efef4ad 100644 (file)
@@ -482,8 +482,11 @@ static void mlx5e_ptp_build_params(struct mlx5e_ptp *c,
                params->log_sq_size = orig->log_sq_size;
                mlx5e_ptp_build_sq_param(c->mdev, params, &cparams->txq_sq_param);
        }
-       if (test_bit(MLX5E_PTP_STATE_RX, c->state))
+       /* RQ */
+       if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
+               params->vlan_strip_disable = orig->vlan_strip_disable;
                mlx5e_ptp_build_rq_param(c->mdev, c->netdev, c->priv->q_counter, cparams);
+       }
 }
 
 static int mlx5e_init_ptp_rq(struct mlx5e_ptp *c, struct mlx5e_params *params,
@@ -494,7 +497,7 @@ static int mlx5e_init_ptp_rq(struct mlx5e_ptp *c, struct mlx5e_params *params,
        int err;
 
        rq->wq_type      = params->rq_wq_type;
-       rq->pdev         = mdev->device;
+       rq->pdev         = c->pdev;
        rq->netdev       = priv->netdev;
        rq->priv         = priv;
        rq->clock        = &mdev->clock;
index 8f79f04..1e2d117 100644 (file)
@@ -124,6 +124,11 @@ static int mlx5e_route_lookup_ipv4_get(struct mlx5e_priv *priv,
        if (IS_ERR(rt))
                return PTR_ERR(rt);
 
+       if (rt->rt_type != RTN_UNICAST) {
+               ret = -ENETUNREACH;
+               goto err_rt_release;
+       }
+
        if (mlx5_lag_is_multipath(mdev) && rt->rt_gw_family != AF_INET) {
                ret = -ENETUNREACH;
                goto err_rt_release;
index 86ab4e8..7f94508 100644 (file)
@@ -37,7 +37,7 @@ static void mlx5e_init_trap_rq(struct mlx5e_trap *t, struct mlx5e_params *params
        struct mlx5e_priv *priv = t->priv;
 
        rq->wq_type      = params->rq_wq_type;
-       rq->pdev         = mdev->device;
+       rq->pdev         = t->pdev;
        rq->netdev       = priv->netdev;
        rq->priv         = priv;
        rq->clock        = &mdev->clock;
index d09e655..24f919e 100644 (file)
@@ -1535,15 +1535,9 @@ static int mlx5e_alloc_cq_common(struct mlx5e_priv *priv,
 {
        struct mlx5_core_dev *mdev = priv->mdev;
        struct mlx5_core_cq *mcq = &cq->mcq;
-       int eqn_not_used;
-       unsigned int irqn;
        int err;
        u32 i;
 
-       err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
-       if (err)
-               return err;
-
        err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
                               &cq->wq_ctrl);
        if (err)
@@ -1557,7 +1551,6 @@ static int mlx5e_alloc_cq_common(struct mlx5e_priv *priv,
        mcq->vector     = param->eq_ix;
        mcq->comp       = mlx5e_completion_event;
        mcq->event      = mlx5e_cq_error_event;
-       mcq->irqn       = irqn;
 
        for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
                struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
@@ -1605,11 +1598,10 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
        void *in;
        void *cqc;
        int inlen;
-       unsigned int irqn_not_used;
        int eqn;
        int err;
 
-       err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
+       err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn);
        if (err)
                return err;
 
@@ -1891,30 +1883,30 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
        if (err)
                goto err_close_icosq;
 
+       err = mlx5e_open_rxq_rq(c, params, &cparam->rq);
+       if (err)
+               goto err_close_sqs;
+
        if (c->xdp) {
                err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, NULL,
                                       &c->rq_xdpsq, false);
                if (err)
-                       goto err_close_sqs;
+                       goto err_close_rq;
        }
 
-       err = mlx5e_open_rxq_rq(c, params, &cparam->rq);
-       if (err)
-               goto err_close_xdp_sq;
-
        err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, NULL, &c->xdpsq, true);
        if (err)
-               goto err_close_rq;
+               goto err_close_xdp_sq;
 
        return 0;
 
-err_close_rq:
-       mlx5e_close_rq(&c->rq);
-
 err_close_xdp_sq:
        if (c->xdp)
                mlx5e_close_xdpsq(&c->rq_xdpsq);
 
+err_close_rq:
+       mlx5e_close_rq(&c->rq);
+
 err_close_sqs:
        mlx5e_close_sqs(c);
 
@@ -1949,9 +1941,9 @@ err_close_async_icosq_cq:
 static void mlx5e_close_queues(struct mlx5e_channel *c)
 {
        mlx5e_close_xdpsq(&c->xdpsq);
-       mlx5e_close_rq(&c->rq);
        if (c->xdp)
                mlx5e_close_xdpsq(&c->rq_xdpsq);
+       mlx5e_close_rq(&c->rq);
        mlx5e_close_sqs(c);
        mlx5e_close_icosq(&c->icosq);
        mlx5e_close_icosq(&c->async_icosq);
@@ -1983,9 +1975,8 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
        struct mlx5e_channel *c;
        unsigned int irq;
        int err;
-       int eqn;
 
-       err = mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
+       err = mlx5_vector2irqn(priv->mdev, ix, &irq);
        if (err)
                return err;
 
@@ -3384,7 +3375,7 @@ static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels *chs, bool en
 
 static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
 {
-       int err = 0;
+       int err;
        int i;
 
        for (i = 0; i < chs->num; i++) {
@@ -3392,6 +3383,8 @@ static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
                if (err)
                        return err;
        }
+       if (chs->ptp && test_bit(MLX5E_PTP_STATE_RX, chs->ptp->state))
+               return mlx5e_modify_rq_vsd(&chs->ptp->rq, vsd);
 
        return 0;
 }
@@ -3829,6 +3822,24 @@ int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
        return 0;
 }
 
+static netdev_features_t mlx5e_fix_uplink_rep_features(struct net_device *netdev,
+                                                      netdev_features_t features)
+{
+       features &= ~NETIF_F_HW_TLS_RX;
+       if (netdev->features & NETIF_F_HW_TLS_RX)
+               netdev_warn(netdev, "Disabling hw_tls_rx, not supported in switchdev mode\n");
+
+       features &= ~NETIF_F_HW_TLS_TX;
+       if (netdev->features & NETIF_F_HW_TLS_TX)
+               netdev_warn(netdev, "Disabling hw_tls_tx, not supported in switchdev mode\n");
+
+       features &= ~NETIF_F_NTUPLE;
+       if (netdev->features & NETIF_F_NTUPLE)
+               netdev_warn(netdev, "Disabling ntuple, not supported in switchdev mode\n");
+
+       return features;
+}
+
 static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
                                            netdev_features_t features)
 {
@@ -3860,15 +3871,8 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
                        netdev_warn(netdev, "Disabling rxhash, not supported when CQE compress is active\n");
        }
 
-       if (mlx5e_is_uplink_rep(priv)) {
-               features &= ~NETIF_F_HW_TLS_RX;
-               if (netdev->features & NETIF_F_HW_TLS_RX)
-                       netdev_warn(netdev, "Disabling hw_tls_rx, not supported in switchdev mode\n");
-
-               features &= ~NETIF_F_HW_TLS_TX;
-               if (netdev->features & NETIF_F_HW_TLS_TX)
-                       netdev_warn(netdev, "Disabling hw_tls_tx, not supported in switchdev mode\n");
-       }
+       if (mlx5e_is_uplink_rep(priv))
+               features = mlx5e_fix_uplink_rep_features(netdev, features);
 
        mutex_unlock(&priv->state_lock);
 
@@ -4859,6 +4863,9 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
        if (MLX5_CAP_ETH(mdev, scatter_fcs))
                netdev->hw_features |= NETIF_F_RXFCS;
 
+       if (mlx5_qos_is_supported(mdev))
+               netdev->hw_features |= NETIF_F_HW_TC;
+
        netdev->features          = netdev->hw_features;
 
        /* Defaults */
@@ -4879,8 +4886,6 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
                netdev->hw_features      |= NETIF_F_NTUPLE;
 #endif
        }
-       if (mlx5_qos_is_supported(mdev))
-               netdev->features |= NETIF_F_HW_TC;
 
        netdev->features         |= NETIF_F_HIGHDMA;
        netdev->features         |= NETIF_F_HW_VLAN_STAG_FILTER;
index 629a61e..d273758 100644 (file)
@@ -452,12 +452,32 @@ static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
 static
 struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
 {
+       struct mlx5_core_dev *mdev;
        struct net_device *netdev;
        struct mlx5e_priv *priv;
 
-       netdev = __dev_get_by_index(net, ifindex);
+       netdev = dev_get_by_index(net, ifindex);
+       if (!netdev)
+               return ERR_PTR(-ENODEV);
+
        priv = netdev_priv(netdev);
-       return priv->mdev;
+       mdev = priv->mdev;
+       dev_put(netdev);
+
+       /* Mirred tc action holds a refcount on the ifindex net_device (see
+        * net/sched/act_mirred.c:tcf_mirred_get_dev). So, it's okay to continue using mdev
+        * after dev_put(netdev), while we're in the context of adding a tc flow.
+        *
+        * The mdev pointer corresponds to the peer/out net_device of a hairpin. It is then
+        * stored in a hairpin object, which exists until all flows, that refer to it, get
+        * removed.
+        *
+        * On the other hand, after a hairpin object has been created, the peer net_device may
+        * be removed/unbound while there are still some hairpin flows that are using it. This
+        * case is handled by mlx5e_tc_hairpin_update_dead_peer, which is hooked to
+        * NETDEV_UNREGISTER event of the peer net_device.
+        */
+       return mdev;
 }
 
 static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
@@ -666,6 +686,10 @@ mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params
 
        func_mdev = priv->mdev;
        peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
+       if (IS_ERR(peer_mdev)) {
+               err = PTR_ERR(peer_mdev);
+               goto create_pair_err;
+       }
 
        pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
        if (IS_ERR(pair)) {
@@ -804,6 +828,11 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
        int err;
 
        peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
+       if (IS_ERR(peer_mdev)) {
+               NL_SET_ERR_MSG_MOD(extack, "invalid ifindex of mirred device");
+               return PTR_ERR(peer_mdev);
+       }
+
        if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
                NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
                return -EOPNOTSUPP;
index 6e074cc..605c8ec 100644 (file)
@@ -855,8 +855,8 @@ clean:
        return err;
 }
 
-int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
-                   unsigned int *irqn)
+static int vector2eqnirqn(struct mlx5_core_dev *dev, int vector, int *eqn,
+                         unsigned int *irqn)
 {
        struct mlx5_eq_table *table = dev->priv.eq_table;
        struct mlx5_eq_comp *eq, *n;
@@ -865,8 +865,10 @@ int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
 
        list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
                if (i++ == vector) {
-                       *eqn = eq->core.eqn;
-                       *irqn = eq->core.irqn;
+                       if (irqn)
+                               *irqn = eq->core.irqn;
+                       if (eqn)
+                               *eqn = eq->core.eqn;
                        err = 0;
                        break;
                }
@@ -874,8 +876,18 @@ int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
 
        return err;
 }
+
+int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn)
+{
+       return vector2eqnirqn(dev, vector, eqn, NULL);
+}
 EXPORT_SYMBOL(mlx5_vector2eqn);
 
+int mlx5_vector2irqn(struct mlx5_core_dev *dev, int vector, unsigned int *irqn)
+{
+       return vector2eqnirqn(dev, vector, NULL, irqn);
+}
+
 unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev)
 {
        return dev->priv.eq_table->num_comp_eqs;
index a6e1d4f..69a3630 100644 (file)
@@ -69,7 +69,7 @@ static void
 mlx5_esw_bridge_fdb_offload_notify(struct net_device *dev, const unsigned char *addr, u16 vid,
                                   unsigned long val)
 {
-       struct switchdev_notifier_fdb_info send_info;
+       struct switchdev_notifier_fdb_info send_info = {};
 
        send_info.addr = addr;
        send_info.vid = vid;
@@ -579,7 +579,7 @@ static struct mlx5_esw_bridge *mlx5_esw_bridge_create(int ifindex,
        xa_init(&bridge->vports);
        bridge->ifindex = ifindex;
        bridge->refcnt = 1;
-       bridge->ageing_time = BR_DEFAULT_AGEING_TIME;
+       bridge->ageing_time = clock_t_to_jiffies(BR_DEFAULT_AGEING_TIME);
        list_add(&bridge->list, &br_offloads->bridges);
 
        return bridge;
@@ -1006,7 +1006,7 @@ int mlx5_esw_bridge_ageing_time_set(unsigned long ageing_time, struct mlx5_eswit
        if (!vport->bridge)
                return -EINVAL;
 
-       vport->bridge->ageing_time = ageing_time;
+       vport->bridge->ageing_time = clock_t_to_jiffies(ageing_time);
        return 0;
 }
 
index 794012c..d3ad78a 100644 (file)
@@ -501,6 +501,7 @@ err_sampler:
 err_offload_rule:
        mlx5_esw_vporttbl_put(esw, &per_vport_tbl_attr);
 err_default_tbl:
+       kfree(sample_flow);
        return ERR_PTR(err);
 }
 
index 48cac5b..d562edf 100644 (file)
@@ -636,7 +636,7 @@ struct esw_vport_tbl_namespace {
 };
 
 struct mlx5_vport_tbl_attr {
-       u16 chain;
+       u32 chain;
        u16 prio;
        u16 vport;
        const struct esw_vport_tbl_namespace *vport_ns;
index 7579f34..3bb71a1 100644 (file)
@@ -48,6 +48,7 @@
 #include "lib/fs_chains.h"
 #include "en_tc.h"
 #include "en/mapping.h"
+#include "devlink.h"
 
 #define mlx5_esw_for_each_rep(esw, i, rep) \
        xa_for_each(&((esw)->offloads.vport_reps), i, rep)
@@ -382,10 +383,11 @@ esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *f
 {
        dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
        dest[dest_idx].vport.num = esw_attr->dests[attr_idx].rep->vport;
-       dest[dest_idx].vport.vhca_id =
-               MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id);
-       if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
+       if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
+               dest[dest_idx].vport.vhca_id =
+                       MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id);
                dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
+       }
        if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP) {
                if (pkt_reformat) {
                        flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
@@ -2367,6 +2369,9 @@ static int mlx5_esw_offloads_devcom_event(int event,
 
        switch (event) {
        case ESW_OFFLOADS_DEVCOM_PAIR:
+               if (mlx5_get_next_phys_dev(esw->dev) != peer_esw->dev)
+                       break;
+
                if (mlx5_eswitch_vport_match_metadata_enabled(esw) !=
                    mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
                        break;
@@ -2997,12 +3002,19 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
        if (cur_mlx5_mode == mlx5_mode)
                goto unlock;
 
-       if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
+       if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) {
+               if (mlx5_devlink_trap_get_num_active(esw->dev)) {
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "Can't change mode while devlink traps are active");
+                       err = -EOPNOTSUPP;
+                       goto unlock;
+               }
                err = esw_offloads_start(esw, extack);
-       else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
+       } else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) {
                err = esw_offloads_stop(esw, extack);
-       else
+       } else {
                err = -EINVAL;
+       }
 
 unlock:
        mlx5_esw_unlock(esw);
index bd66ab2..d5da4ab 100644 (file)
@@ -417,7 +417,6 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
        struct mlx5_wq_param wqp;
        struct mlx5_cqe64 *cqe;
        int inlen, err, eqn;
-       unsigned int irqn;
        void *cqc, *in;
        __be64 *pas;
        u32 i;
@@ -446,7 +445,7 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
                goto err_cqwq;
        }
 
-       err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn, &irqn);
+       err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn);
        if (err) {
                kvfree(in);
                goto err_cqwq;
@@ -476,7 +475,6 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
        *conn->cq.mcq.arm_db    = 0;
        conn->cq.mcq.vector     = 0;
        conn->cq.mcq.comp       = mlx5_fpga_conn_cq_complete;
-       conn->cq.mcq.irqn       = irqn;
        conn->cq.mcq.uar        = fdev->conn_res.uar;
        tasklet_setup(&conn->cq.tasklet, mlx5_fpga_conn_cq_tasklet);
 
index d7bf0a3..c0697e1 100644 (file)
@@ -1024,17 +1024,19 @@ static int connect_fwd_rules(struct mlx5_core_dev *dev,
 static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
                              struct fs_prio *prio)
 {
-       struct mlx5_flow_table *next_ft;
+       struct mlx5_flow_table *next_ft, *first_ft;
        int err = 0;
 
        /* Connect_prev_fts and update_root_ft_create are mutually exclusive */
 
-       if (list_empty(&prio->node.children)) {
+       first_ft = list_first_entry_or_null(&prio->node.children,
+                                           struct mlx5_flow_table, node.list);
+       if (!first_ft || first_ft->level > ft->level) {
                err = connect_prev_fts(dev, ft, prio);
                if (err)
                        return err;
 
-               next_ft = find_next_chained_ft(prio);
+               next_ft = first_ft ? first_ft : find_next_chained_ft(prio);
                err = connect_fwd_rules(dev, ft, next_ft);
                if (err)
                        return err;
@@ -2120,7 +2122,7 @@ static int disconnect_flow_table(struct mlx5_flow_table *ft)
                                node.list) == ft))
                return 0;
 
-       next_ft = find_next_chained_ft(prio);
+       next_ft = find_next_ft(ft);
        err = connect_fwd_rules(dev, next_ft, ft);
        if (err)
                return err;
index 9ff163c..9abeb80 100644 (file)
@@ -626,8 +626,16 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
        }
        fw_reporter_ctx.err_synd = health->synd;
        fw_reporter_ctx.miss_counter = health->miss_counter;
-       devlink_health_report(health->fw_fatal_reporter,
-                             "FW fatal error reported", &fw_reporter_ctx);
+       if (devlink_health_report(health->fw_fatal_reporter,
+                                 "FW fatal error reported", &fw_reporter_ctx) == -ECANCELED) {
+               /* If recovery wasn't performed, due to grace period,
+                * unload the driver. This ensures that the driver
+                * closes all its resources and it is not subjected to
+                * requests from the kernel.
+                */
+               mlx5_core_err(dev, "Driver is in error state. Unloading\n");
+               mlx5_unload_one(dev);
+       }
 }
 
 static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ops = {
index 624cede..d3d628b 100644 (file)
@@ -104,4 +104,6 @@ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev);
 struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev);
 #endif
 
+int mlx5_vector2irqn(struct mlx5_core_dev *dev, int vector, unsigned int *irqn);
+
 #endif
index eb1b316..c84ad87 100644 (file)
@@ -1784,16 +1784,14 @@ static int __init init(void)
        if (err)
                goto err_sf;
 
-#ifdef CONFIG_MLX5_CORE_EN
        err = mlx5e_init();
-       if (err) {
-               pci_unregister_driver(&mlx5_core_driver);
-               goto err_debug;
-       }
-#endif
+       if (err)
+               goto err_en;
 
        return 0;
 
+err_en:
+       mlx5_sf_driver_unregister();
 err_sf:
        pci_unregister_driver(&mlx5_core_driver);
 err_debug:
@@ -1803,9 +1801,7 @@ err_debug:
 
 static void __exit cleanup(void)
 {
-#ifdef CONFIG_MLX5_CORE_EN
        mlx5e_cleanup();
-#endif
        mlx5_sf_driver_unregister();
        pci_unregister_driver(&mlx5_core_driver);
        mlx5_unregister_debugfs();
index 343807a..da365b8 100644 (file)
@@ -206,8 +206,13 @@ int mlx5_firmware_flash(struct mlx5_core_dev *dev, const struct firmware *fw,
 int mlx5_fw_version_query(struct mlx5_core_dev *dev,
                          u32 *running_ver, u32 *stored_ver);
 
+#ifdef CONFIG_MLX5_CORE_EN
 int mlx5e_init(void);
 void mlx5e_cleanup(void);
+#else
+static inline int mlx5e_init(void){ return 0; }
+static inline void mlx5e_cleanup(void){}
+#endif
 
 static inline bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev)
 {
index b25f764..3465b36 100644 (file)
@@ -214,6 +214,7 @@ static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i)
                err = -ENOMEM;
                goto err_cpumask;
        }
+       irq->pool = pool;
        kref_init(&irq->kref);
        irq->index = i;
        err = xa_err(xa_store(&pool->irqs, irq->index, irq, GFP_KERNEL));
@@ -222,7 +223,6 @@ static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i)
                              irq->index, err);
                goto err_xa;
        }
-       irq->pool = pool;
        return irq;
 err_xa:
        free_cpumask_var(irq->mask);
@@ -251,8 +251,11 @@ int mlx5_irq_attach_nb(struct mlx5_irq *irq, struct notifier_block *nb)
 
 int mlx5_irq_detach_nb(struct mlx5_irq *irq, struct notifier_block *nb)
 {
+       int err = 0;
+
+       err = atomic_notifier_chain_unregister(&irq->nh, nb);
        irq_put(irq);
-       return atomic_notifier_chain_unregister(&irq->nh, nb);
+       return err;
 }
 
 struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq)
@@ -437,6 +440,7 @@ irq_pool_alloc(struct mlx5_core_dev *dev, int start, int size, char *name,
        if (!pool)
                return ERR_PTR(-ENOMEM);
        pool->dev = dev;
+       mutex_init(&pool->lock);
        xa_init_flags(&pool->irqs, XA_FLAGS_ALLOC);
        pool->xa_num_irqs.min = start;
        pool->xa_num_irqs.max = start + size - 1;
@@ -445,7 +449,6 @@ irq_pool_alloc(struct mlx5_core_dev *dev, int start, int size, char *name,
                         name);
        pool->min_threshold = min_threshold * MLX5_EQ_REFS_PER_IRQ;
        pool->max_threshold = max_threshold * MLX5_EQ_REFS_PER_IRQ;
-       mutex_init(&pool->lock);
        mlx5_core_dbg(dev, "pool->name = %s, pool->size = %d, pool->start = %d",
                      name, size, start);
        return pool;
@@ -459,6 +462,7 @@ static void irq_pool_free(struct mlx5_irq_pool *pool)
        xa_for_each(&pool->irqs, index, irq)
                irq_release(&irq->kref);
        xa_destroy(&pool->irqs);
+       mutex_destroy(&pool->lock);
        kvfree(pool);
 }
 
index 12cf323..9df0e73 100644 (file)
@@ -749,7 +749,6 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
        struct mlx5_cqe64 *cqe;
        struct mlx5dr_cq *cq;
        int inlen, err, eqn;
-       unsigned int irqn;
        void *cqc, *in;
        __be64 *pas;
        int vector;
@@ -782,7 +781,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
                goto err_cqwq;
 
        vector = raw_smp_processor_id() % mlx5_comp_vectors_count(mdev);
-       err = mlx5_vector2eqn(mdev, vector, &eqn, &irqn);
+       err = mlx5_vector2eqn(mdev, vector, &eqn);
        if (err) {
                kvfree(in);
                goto err_cqwq;
@@ -818,7 +817,6 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
        *cq->mcq.arm_db = cpu_to_be32(2 << 28);
 
        cq->mcq.vector = 0;
-       cq->mcq.irqn = irqn;
        cq->mcq.uar = uar;
 
        return cq;
index f1950e4..e4dd4ee 100644 (file)
@@ -352,6 +352,7 @@ static void dr_ste_v0_set_rx_decap(u8 *hw_ste_p)
 {
        MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
                 DR_STE_TUNL_ACTION_DECAP);
+       MLX5_SET(ste_rx_steering_mult, hw_ste_p, fail_on_error, 1);
 }
 
 static void dr_ste_v0_set_rx_pop_vlan(u8 *hw_ste_p)
@@ -365,6 +366,7 @@ static void dr_ste_v0_set_rx_decap_l3(u8 *hw_ste_p, bool vlan)
        MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
                 DR_STE_TUNL_ACTION_L3_DECAP);
        MLX5_SET(ste_modify_packet, hw_ste_p, action_description, vlan ? 1 : 0);
+       MLX5_SET(ste_rx_steering_mult, hw_ste_p, fail_on_error, 1);
 }
 
 static void dr_ste_v0_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions,
index 7e221ef..f69cbb3 100644 (file)
@@ -9079,7 +9079,7 @@ mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
 
 static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
 {
-       struct switchdev_notifier_fdb_info info;
+       struct switchdev_notifier_fdb_info info = {};
        struct net_device *dev;
 
        dev = br_fdb_find_port(rif->dev, mac, 0);
@@ -9127,8 +9127,8 @@ mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
 
 static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
 {
+       struct switchdev_notifier_fdb_info info = {};
        u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
-       struct switchdev_notifier_fdb_info info;
        struct net_device *br_dev;
        struct net_device *dev;
 
index c5ef9aa..8f90cd3 100644 (file)
@@ -2508,7 +2508,7 @@ mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type,
                            const char *mac, u16 vid,
                            struct net_device *dev, bool offloaded)
 {
-       struct switchdev_notifier_fdb_info info;
+       struct switchdev_notifier_fdb_info info = {};
 
        info.addr = mac;
        info.vid = vid;
index 0443f66..9a8e4f2 100644 (file)
@@ -277,7 +277,7 @@ static void sparx5_fdb_call_notifiers(enum switchdev_notifier_type type,
                                      const char *mac, u16 vid,
                                      struct net_device *dev, bool offloaded)
 {
-       struct switchdev_notifier_fdb_info info;
+       struct switchdev_notifier_fdb_info info = {};
 
        info.addr = mac;
        info.vid = vid;
index 9d485a9..cb68eaa 100644 (file)
  */
 #define VSTAX 73
 
-static void ifh_encode_bitfield(void *ifh, u64 value, u32 pos, u32 width)
+#define ifh_encode_bitfield(ifh, value, pos, _width)                   \
+       ({                                                              \
+               u32 width = (_width);                                   \
+                                                                       \
+               /* Max width is 5 bytes - 40 bits. In worst case this will
+                * spread over 6 bytes - 48 bits
+                */                                                     \
+               compiletime_assert(width <= 40,                         \
+                                  "Unsupported width, must be <= 40"); \
+               __ifh_encode_bitfield((ifh), (value), (pos), width);    \
+       })
+
+static void __ifh_encode_bitfield(void *ifh, u64 value, u32 pos, u32 width)
 {
        u8 *ifh_hdr = ifh;
        /* Calculate the Start IFH byte position of this IFH bit position */
        u32 byte = (35 - (pos / 8));
        /* Calculate the Start bit position in the Start IFH byte */
        u32 bit  = (pos % 8);
-       u64 encode = GENMASK(bit + width - 1, bit) & (value << bit);
-
-       /* Max width is 5 bytes - 40 bits. In worst case this will
-        * spread over 6 bytes - 48 bits
-        */
-       compiletime_assert(width <= 40, "Unsupported width, must be <= 40");
+       u64 encode = GENMASK_ULL(bit + width - 1, bit) & (value << bit);
 
        /* The b0-b7 goes into the start IFH byte */
        if (encode & 0xFF)
index adfb978..2948d73 100644 (file)
@@ -1334,6 +1334,7 @@ void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot)
                        struct net_device *bond = ocelot_port->bond;
 
                        mask = ocelot_get_bridge_fwd_mask(ocelot, bridge);
+                       mask |= cpu_fwd_mask;
                        mask &= ~BIT(port);
                        if (bond) {
                                mask &= ~ocelot_get_bond_mask(ocelot, bond,
index ea4e834..7390fa3 100644 (file)
@@ -21,7 +21,7 @@ u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset)
                    ocelot->map[target][reg & REG_MASK] + offset, &val);
        return val;
 }
-EXPORT_SYMBOL(__ocelot_read_ix);
+EXPORT_SYMBOL_GPL(__ocelot_read_ix);
 
 void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset)
 {
@@ -32,7 +32,7 @@ void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset)
        regmap_write(ocelot->targets[target],
                     ocelot->map[target][reg & REG_MASK] + offset, val);
 }
-EXPORT_SYMBOL(__ocelot_write_ix);
+EXPORT_SYMBOL_GPL(__ocelot_write_ix);
 
 void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask, u32 reg,
                     u32 offset)
@@ -45,7 +45,7 @@ void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask, u32 reg,
                           ocelot->map[target][reg & REG_MASK] + offset,
                           mask, val);
 }
-EXPORT_SYMBOL(__ocelot_rmw_ix);
+EXPORT_SYMBOL_GPL(__ocelot_rmw_ix);
 
 u32 ocelot_port_readl(struct ocelot_port *port, u32 reg)
 {
@@ -58,7 +58,7 @@ u32 ocelot_port_readl(struct ocelot_port *port, u32 reg)
        regmap_read(port->target, ocelot->map[target][reg & REG_MASK], &val);
        return val;
 }
-EXPORT_SYMBOL(ocelot_port_readl);
+EXPORT_SYMBOL_GPL(ocelot_port_readl);
 
 void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg)
 {
@@ -69,7 +69,7 @@ void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg)
 
        regmap_write(port->target, ocelot->map[target][reg & REG_MASK], val);
 }
-EXPORT_SYMBOL(ocelot_port_writel);
+EXPORT_SYMBOL_GPL(ocelot_port_writel);
 
 void ocelot_port_rmwl(struct ocelot_port *port, u32 val, u32 mask, u32 reg)
 {
@@ -77,7 +77,7 @@ void ocelot_port_rmwl(struct ocelot_port *port, u32 val, u32 mask, u32 reg)
 
        ocelot_port_writel(port, (cur & (~mask)) | val, reg);
 }
-EXPORT_SYMBOL(ocelot_port_rmwl);
+EXPORT_SYMBOL_GPL(ocelot_port_rmwl);
 
 u32 __ocelot_target_read_ix(struct ocelot *ocelot, enum ocelot_target target,
                            u32 reg, u32 offset)
@@ -128,7 +128,7 @@ int ocelot_regfields_init(struct ocelot *ocelot,
 
        return 0;
 }
-EXPORT_SYMBOL(ocelot_regfields_init);
+EXPORT_SYMBOL_GPL(ocelot_regfields_init);
 
 static struct regmap_config ocelot_regmap_config = {
        .reg_bits       = 32,
@@ -148,4 +148,4 @@ struct regmap *ocelot_regmap_init(struct ocelot *ocelot, struct resource *res)
 
        return devm_regmap_init_mmio(ocelot->dev, regs, &ocelot_regmap_config);
 }
-EXPORT_SYMBOL(ocelot_regmap_init);
+EXPORT_SYMBOL_GPL(ocelot_regmap_init);
index 51b4b25..84f7dbe 100644 (file)
@@ -819,7 +819,7 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
                printk(version);
 #endif
 
-       i = pci_enable_device(pdev);
+       i = pcim_enable_device(pdev);
        if (i) return i;
 
        /* natsemi has a non-standard PM control register
@@ -852,7 +852,7 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
        ioaddr = ioremap(iostart, iosize);
        if (!ioaddr) {
                i = -ENOMEM;
-               goto err_ioremap;
+               goto err_pci_request_regions;
        }
 
        /* Work around the dropped serial bit. */
@@ -974,9 +974,6 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
  err_register_netdev:
        iounmap(ioaddr);
 
- err_ioremap:
-       pci_release_regions(pdev);
-
  err_pci_request_regions:
        free_netdev(dev);
        return i;
@@ -3241,7 +3238,6 @@ static void natsemi_remove1(struct pci_dev *pdev)
 
        NATSEMI_REMOVE_FILE(pdev, dspcfg_workaround);
        unregister_netdev (dev);
-       pci_release_regions (pdev);
        iounmap(ioaddr);
        free_netdev (dev);
 }
index 82eef4c..7abd13e 100644 (file)
@@ -3512,13 +3512,13 @@ static void vxge_device_unregister(struct __vxge_hw_device *hldev)
 
        kfree(vdev->vpaths);
 
-       /* we are safe to free it now */
-       free_netdev(dev);
-
        vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered",
                        buf);
        vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d  Exiting...", buf,
                             __func__, __LINE__);
+
+       /* we are safe to free it now */
+       free_netdev(dev);
 }
 
 /*
index 1b48244..8803faa 100644 (file)
@@ -286,6 +286,8 @@ nfp_net_get_link_ksettings(struct net_device *netdev,
 
        /* Init to unknowns */
        ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+       ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
+       ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
        cmd->base.port = PORT_OTHER;
        cmd->base.speed = SPEED_UNKNOWN;
        cmd->base.duplex = DUPLEX_UNKNOWN;
index af3a536..e795fa6 100644 (file)
@@ -29,7 +29,7 @@ static const u8 ionic_qtype_versions[IONIC_QTYPE_MAX] = {
                                      */
 };
 
-static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode);
+static void ionic_lif_rx_mode(struct ionic_lif *lif);
 static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr);
 static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr);
 static void ionic_link_status_check(struct ionic_lif *lif);
@@ -53,7 +53,19 @@ static void ionic_dim_work(struct work_struct *work)
        cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
        qcq = container_of(dim, struct ionic_qcq, dim);
        new_coal = ionic_coal_usec_to_hw(qcq->q.lif->ionic, cur_moder.usec);
-       qcq->intr.dim_coal_hw = new_coal ? new_coal : 1;
+       new_coal = new_coal ? new_coal : 1;
+
+       if (qcq->intr.dim_coal_hw != new_coal) {
+               unsigned int qi = qcq->cq.bound_q->index;
+               struct ionic_lif *lif = qcq->q.lif;
+
+               qcq->intr.dim_coal_hw = new_coal;
+
+               ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
+                                    lif->rxqcqs[qi]->intr.index,
+                                    qcq->intr.dim_coal_hw);
+       }
+
        dim->state = DIM_START_MEASURE;
 }
 
@@ -77,7 +89,7 @@ static void ionic_lif_deferred_work(struct work_struct *work)
 
                switch (w->type) {
                case IONIC_DW_TYPE_RX_MODE:
-                       ionic_lif_rx_mode(lif, w->rx_mode);
+                       ionic_lif_rx_mode(lif);
                        break;
                case IONIC_DW_TYPE_RX_ADDR_ADD:
                        ionic_lif_addr_add(lif, w->addr);
@@ -1301,10 +1313,8 @@ static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr)
        return 0;
 }
 
-static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add,
-                         bool can_sleep)
+static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add)
 {
-       struct ionic_deferred_work *work;
        unsigned int nmfilters;
        unsigned int nufilters;
 
@@ -1330,97 +1340,46 @@ static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add,
                        lif->nucast--;
        }
 
-       if (!can_sleep) {
-               work = kzalloc(sizeof(*work), GFP_ATOMIC);
-               if (!work)
-                       return -ENOMEM;
-               work->type = add ? IONIC_DW_TYPE_RX_ADDR_ADD :
-                                  IONIC_DW_TYPE_RX_ADDR_DEL;
-               memcpy(work->addr, addr, ETH_ALEN);
-               netdev_dbg(lif->netdev, "deferred: rx_filter %s %pM\n",
-                          add ? "add" : "del", addr);
-               ionic_lif_deferred_enqueue(&lif->deferred, work);
-       } else {
-               netdev_dbg(lif->netdev, "rx_filter %s %pM\n",
-                          add ? "add" : "del", addr);
-               if (add)
-                       return ionic_lif_addr_add(lif, addr);
-               else
-                       return ionic_lif_addr_del(lif, addr);
-       }
+       netdev_dbg(lif->netdev, "rx_filter %s %pM\n",
+                  add ? "add" : "del", addr);
+       if (add)
+               return ionic_lif_addr_add(lif, addr);
+       else
+               return ionic_lif_addr_del(lif, addr);
 
        return 0;
 }
 
 static int ionic_addr_add(struct net_device *netdev, const u8 *addr)
 {
-       return ionic_lif_addr(netdev_priv(netdev), addr, ADD_ADDR, CAN_SLEEP);
-}
-
-static int ionic_ndo_addr_add(struct net_device *netdev, const u8 *addr)
-{
-       return ionic_lif_addr(netdev_priv(netdev), addr, ADD_ADDR, CAN_NOT_SLEEP);
+       return ionic_lif_addr(netdev_priv(netdev), addr, ADD_ADDR);
 }
 
 static int ionic_addr_del(struct net_device *netdev, const u8 *addr)
 {
-       return ionic_lif_addr(netdev_priv(netdev), addr, DEL_ADDR, CAN_SLEEP);
+       return ionic_lif_addr(netdev_priv(netdev), addr, DEL_ADDR);
 }
 
-static int ionic_ndo_addr_del(struct net_device *netdev, const u8 *addr)
+static void ionic_lif_rx_mode(struct ionic_lif *lif)
 {
-       return ionic_lif_addr(netdev_priv(netdev), addr, DEL_ADDR, CAN_NOT_SLEEP);
-}
-
-static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode)
-{
-       struct ionic_admin_ctx ctx = {
-               .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
-               .cmd.rx_mode_set = {
-                       .opcode = IONIC_CMD_RX_MODE_SET,
-                       .lif_index = cpu_to_le16(lif->index),
-                       .rx_mode = cpu_to_le16(rx_mode),
-               },
-       };
+       struct net_device *netdev = lif->netdev;
+       unsigned int nfilters;
+       unsigned int nd_flags;
        char buf[128];
-       int err;
+       u16 rx_mode;
        int i;
 #define REMAIN(__x) (sizeof(buf) - (__x))
 
-       i = scnprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:",
-                     lif->rx_mode, rx_mode);
-       if (rx_mode & IONIC_RX_MODE_F_UNICAST)
-               i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST");
-       if (rx_mode & IONIC_RX_MODE_F_MULTICAST)
-               i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST");
-       if (rx_mode & IONIC_RX_MODE_F_BROADCAST)
-               i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST");
-       if (rx_mode & IONIC_RX_MODE_F_PROMISC)
-               i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC");
-       if (rx_mode & IONIC_RX_MODE_F_ALLMULTI)
-               i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI");
-       netdev_dbg(lif->netdev, "lif%d %s\n", lif->index, buf);
-
-       err = ionic_adminq_post_wait(lif, &ctx);
-       if (err)
-               netdev_warn(lif->netdev, "set rx_mode 0x%04x failed: %d\n",
-                           rx_mode, err);
-       else
-               lif->rx_mode = rx_mode;
-}
+       mutex_lock(&lif->config_lock);
 
-static void ionic_set_rx_mode(struct net_device *netdev, bool can_sleep)
-{
-       struct ionic_lif *lif = netdev_priv(netdev);
-       struct ionic_deferred_work *work;
-       unsigned int nfilters;
-       unsigned int rx_mode;
+       /* grab the flags once for local use */
+       nd_flags = netdev->flags;
 
        rx_mode = IONIC_RX_MODE_F_UNICAST;
-       rx_mode |= (netdev->flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0;
-       rx_mode |= (netdev->flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0;
-       rx_mode |= (netdev->flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0;
-       rx_mode |= (netdev->flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0;
+       rx_mode |= (nd_flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0;
+       rx_mode |= (nd_flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0;
+       rx_mode |= (nd_flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0;
+       rx_mode |= (nd_flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0;
 
        /* sync unicast addresses
         * next check to see if we're in an overflow state
@@ -1429,49 +1388,83 @@ static void ionic_set_rx_mode(struct net_device *netdev, bool can_sleep)
         *       we remove our overflow flag and check the netdev flags
         *       to see if we can disable NIC PROMISC
         */
-       if (can_sleep)
-               __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del);
-       else
-               __dev_uc_sync(netdev, ionic_ndo_addr_add, ionic_ndo_addr_del);
+       __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del);
        nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters);
        if (netdev_uc_count(netdev) + 1 > nfilters) {
                rx_mode |= IONIC_RX_MODE_F_PROMISC;
                lif->uc_overflow = true;
        } else if (lif->uc_overflow) {
                lif->uc_overflow = false;
-               if (!(netdev->flags & IFF_PROMISC))
+               if (!(nd_flags & IFF_PROMISC))
                        rx_mode &= ~IONIC_RX_MODE_F_PROMISC;
        }
 
        /* same for multicast */
-       if (can_sleep)
-               __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del);
-       else
-               __dev_mc_sync(netdev, ionic_ndo_addr_add, ionic_ndo_addr_del);
+       __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del);
        nfilters = le32_to_cpu(lif->identity->eth.max_mcast_filters);
        if (netdev_mc_count(netdev) > nfilters) {
                rx_mode |= IONIC_RX_MODE_F_ALLMULTI;
                lif->mc_overflow = true;
        } else if (lif->mc_overflow) {
                lif->mc_overflow = false;
-               if (!(netdev->flags & IFF_ALLMULTI))
+               if (!(nd_flags & IFF_ALLMULTI))
                        rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI;
        }
 
+       i = scnprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:",
+                     lif->rx_mode, rx_mode);
+       if (rx_mode & IONIC_RX_MODE_F_UNICAST)
+               i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST");
+       if (rx_mode & IONIC_RX_MODE_F_MULTICAST)
+               i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST");
+       if (rx_mode & IONIC_RX_MODE_F_BROADCAST)
+               i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST");
+       if (rx_mode & IONIC_RX_MODE_F_PROMISC)
+               i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC");
+       if (rx_mode & IONIC_RX_MODE_F_ALLMULTI)
+               i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI");
+       if (rx_mode & IONIC_RX_MODE_F_RDMA_SNIFFER)
+               i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_RDMA_SNIFFER");
+       netdev_dbg(netdev, "lif%d %s\n", lif->index, buf);
+
        if (lif->rx_mode != rx_mode) {
-               if (!can_sleep) {
-                       work = kzalloc(sizeof(*work), GFP_ATOMIC);
-                       if (!work) {
-                               netdev_err(lif->netdev, "rxmode change dropped\n");
-                               return;
-                       }
-                       work->type = IONIC_DW_TYPE_RX_MODE;
-                       work->rx_mode = rx_mode;
-                       netdev_dbg(lif->netdev, "deferred: rx_mode\n");
-                       ionic_lif_deferred_enqueue(&lif->deferred, work);
-               } else {
-                       ionic_lif_rx_mode(lif, rx_mode);
+               struct ionic_admin_ctx ctx = {
+                       .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
+                       .cmd.rx_mode_set = {
+                               .opcode = IONIC_CMD_RX_MODE_SET,
+                               .lif_index = cpu_to_le16(lif->index),
+                       },
+               };
+               int err;
+
+               ctx.cmd.rx_mode_set.rx_mode = cpu_to_le16(rx_mode);
+               err = ionic_adminq_post_wait(lif, &ctx);
+               if (err)
+                       netdev_warn(netdev, "set rx_mode 0x%04x failed: %d\n",
+                                   rx_mode, err);
+               else
+                       lif->rx_mode = rx_mode;
+       }
+
+       mutex_unlock(&lif->config_lock);
+}
+
+static void ionic_set_rx_mode(struct net_device *netdev, bool can_sleep)
+{
+       struct ionic_lif *lif = netdev_priv(netdev);
+       struct ionic_deferred_work *work;
+
+       if (!can_sleep) {
+               work = kzalloc(sizeof(*work), GFP_ATOMIC);
+               if (!work) {
+                       netdev_err(lif->netdev, "rxmode change dropped\n");
+                       return;
                }
+               work->type = IONIC_DW_TYPE_RX_MODE;
+               netdev_dbg(lif->netdev, "deferred: rx_mode\n");
+               ionic_lif_deferred_enqueue(&lif->deferred, work);
+       } else {
+               ionic_lif_rx_mode(lif);
        }
 }
 
@@ -3058,6 +3051,7 @@ void ionic_lif_deinit(struct ionic_lif *lif)
        ionic_lif_qcq_deinit(lif, lif->notifyqcq);
        ionic_lif_qcq_deinit(lif, lif->adminqcq);
 
+       mutex_destroy(&lif->config_lock);
        mutex_destroy(&lif->queue_lock);
        ionic_lif_reset(lif);
 }
@@ -3185,7 +3179,7 @@ static int ionic_station_set(struct ionic_lif *lif)
                 */
                if (!ether_addr_equal(ctx.comp.lif_getattr.mac,
                                      netdev->dev_addr))
-                       ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR, CAN_SLEEP);
+                       ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR);
        } else {
                /* Update the netdev mac with the device's mac */
                memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len);
@@ -3202,7 +3196,7 @@ static int ionic_station_set(struct ionic_lif *lif)
 
        netdev_dbg(lif->netdev, "adding station MAC addr %pM\n",
                   netdev->dev_addr);
-       ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR, CAN_SLEEP);
+       ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR);
 
        return 0;
 }
@@ -3225,6 +3219,7 @@ int ionic_lif_init(struct ionic_lif *lif)
 
        lif->hw_index = le16_to_cpu(comp.hw_index);
        mutex_init(&lif->queue_lock);
+       mutex_init(&lif->config_lock);
 
        /* now that we have the hw_index we can figure out our doorbell page */
        lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif);
index 346506f..69ab59f 100644 (file)
@@ -108,7 +108,6 @@ struct ionic_deferred_work {
        struct list_head list;
        enum ionic_deferred_work_type type;
        union {
-               unsigned int rx_mode;
                u8 addr[ETH_ALEN];
                u8 fw_status;
        };
@@ -179,6 +178,7 @@ struct ionic_lif {
        unsigned int index;
        unsigned int hw_index;
        struct mutex queue_lock;        /* lock for queue structures */
+       struct mutex config_lock;       /* lock for config actions */
        spinlock_t adminq_lock;         /* lock for AdminQ operations */
        struct ionic_qcq *adminqcq;
        struct ionic_qcq *notifyqcq;
@@ -199,7 +199,7 @@ struct ionic_lif {
        unsigned int nrxq_descs;
        u32 rx_copybreak;
        u64 rxq_features;
-       unsigned int rx_mode;
+       u16 rx_mode;
        u64 hw_features;
        bool registered;
        bool mc_overflow;
@@ -302,7 +302,7 @@ int ionic_lif_identify(struct ionic *ionic, u8 lif_type,
 int ionic_lif_size(struct ionic *ionic);
 
 #if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
-int ionic_lif_hwstamp_replay(struct ionic_lif *lif);
+void ionic_lif_hwstamp_replay(struct ionic_lif *lif);
 int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr);
 int ionic_lif_hwstamp_get(struct ionic_lif *lif, struct ifreq *ifr);
 ktime_t ionic_lif_phc_ktime(struct ionic_lif *lif, u64 counter);
@@ -311,10 +311,7 @@ void ionic_lif_unregister_phc(struct ionic_lif *lif);
 void ionic_lif_alloc_phc(struct ionic_lif *lif);
 void ionic_lif_free_phc(struct ionic_lif *lif);
 #else
-static inline int ionic_lif_hwstamp_replay(struct ionic_lif *lif)
-{
-       return -EOPNOTSUPP;
-}
+static inline void ionic_lif_hwstamp_replay(struct ionic_lif *lif) {}
 
 static inline int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr)
 {
index a87c87e..6e2403c 100644 (file)
@@ -188,6 +188,9 @@ int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr)
        struct hwtstamp_config config;
        int err;
 
+       if (!lif->phc || !lif->phc->ptp)
+               return -EOPNOTSUPP;
+
        if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
                return -EFAULT;
 
@@ -203,15 +206,16 @@ int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr)
        return 0;
 }
 
-int ionic_lif_hwstamp_replay(struct ionic_lif *lif)
+void ionic_lif_hwstamp_replay(struct ionic_lif *lif)
 {
        int err;
 
+       if (!lif->phc || !lif->phc->ptp)
+               return;
+
        err = ionic_lif_hwstamp_set_ts_config(lif, NULL);
        if (err)
                netdev_info(lif->netdev, "hwstamp replay failed: %d\n", err);
-
-       return err;
 }
 
 int ionic_lif_hwstamp_get(struct ionic_lif *lif, struct ifreq *ifr)
index 0893488..0887019 100644 (file)
@@ -274,12 +274,11 @@ static void ionic_rx_clean(struct ionic_queue *q,
                }
        }
 
-       if (likely(netdev->features & NETIF_F_RXCSUM)) {
-               if (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) {
-                       skb->ip_summed = CHECKSUM_COMPLETE;
-                       skb->csum = (__force __wsum)le16_to_cpu(comp->csum);
-                       stats->csum_complete++;
-               }
+       if (likely(netdev->features & NETIF_F_RXCSUM) &&
+           (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC)) {
+               skb->ip_summed = CHECKSUM_COMPLETE;
+               skb->csum = (__force __wsum)le16_to_cpu(comp->csum);
+               stats->csum_complete++;
        } else {
                stats->csum_none++;
        }
@@ -451,11 +450,12 @@ void ionic_rx_empty(struct ionic_queue *q)
        q->tail_idx = 0;
 }
 
-static void ionic_dim_update(struct ionic_qcq *qcq)
+static void ionic_dim_update(struct ionic_qcq *qcq, int napi_mode)
 {
        struct dim_sample dim_sample;
        struct ionic_lif *lif;
        unsigned int qi;
+       u64 pkts, bytes;
 
        if (!qcq->intr.dim_coal_hw)
                return;
@@ -463,14 +463,23 @@ static void ionic_dim_update(struct ionic_qcq *qcq)
        lif = qcq->q.lif;
        qi = qcq->cq.bound_q->index;
 
-       ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
-                            lif->rxqcqs[qi]->intr.index,
-                            qcq->intr.dim_coal_hw);
+       switch (napi_mode) {
+       case IONIC_LIF_F_TX_DIM_INTR:
+               pkts = lif->txqstats[qi].pkts;
+               bytes = lif->txqstats[qi].bytes;
+               break;
+       case IONIC_LIF_F_RX_DIM_INTR:
+               pkts = lif->rxqstats[qi].pkts;
+               bytes = lif->rxqstats[qi].bytes;
+               break;
+       default:
+               pkts = lif->txqstats[qi].pkts + lif->rxqstats[qi].pkts;
+               bytes = lif->txqstats[qi].bytes + lif->rxqstats[qi].bytes;
+               break;
+       }
 
        dim_update_sample(qcq->cq.bound_intr->rearm_count,
-                         lif->txqstats[qi].pkts,
-                         lif->txqstats[qi].bytes,
-                         &dim_sample);
+                         pkts, bytes, &dim_sample);
 
        net_dim(&qcq->dim, dim_sample);
 }
@@ -491,7 +500,7 @@ int ionic_tx_napi(struct napi_struct *napi, int budget)
                                     ionic_tx_service, NULL, NULL);
 
        if (work_done < budget && napi_complete_done(napi, work_done)) {
-               ionic_dim_update(qcq);
+               ionic_dim_update(qcq, IONIC_LIF_F_TX_DIM_INTR);
                flags |= IONIC_INTR_CRED_UNMASK;
                cq->bound_intr->rearm_count++;
        }
@@ -530,7 +539,7 @@ int ionic_rx_napi(struct napi_struct *napi, int budget)
                ionic_rx_fill(cq->bound_q);
 
        if (work_done < budget && napi_complete_done(napi, work_done)) {
-               ionic_dim_update(qcq);
+               ionic_dim_update(qcq, IONIC_LIF_F_RX_DIM_INTR);
                flags |= IONIC_INTR_CRED_UNMASK;
                cq->bound_intr->rearm_count++;
        }
@@ -576,7 +585,7 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
                ionic_rx_fill(rxcq->bound_q);
 
        if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) {
-               ionic_dim_update(qcq);
+               ionic_dim_update(qcq, 0);
                flags |= IONIC_INTR_CRED_UNMASK;
                rxcq->bound_intr->rearm_count++;
        }
index 02a4610..c46a7f7 100644 (file)
@@ -327,6 +327,9 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
        unsigned long flags;
        int rc = -EINVAL;
 
+       if (!p_ll2_conn)
+               return rc;
+
        spin_lock_irqsave(&p_tx->lock, flags);
        if (p_tx->b_completing_packet) {
                rc = -EBUSY;
@@ -500,7 +503,16 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
        unsigned long flags = 0;
        int rc = 0;
 
+       if (!p_ll2_conn)
+               return rc;
+
        spin_lock_irqsave(&p_rx->lock, flags);
+
+       if (!QED_LL2_RX_REGISTERED(p_ll2_conn)) {
+               spin_unlock_irqrestore(&p_rx->lock, flags);
+               return 0;
+       }
+
        cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
        cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
 
@@ -821,6 +833,9 @@ static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
        struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
        int rc;
 
+       if (!p_ll2_conn)
+               return 0;
+
        if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
                return 0;
 
@@ -844,6 +859,9 @@ static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
        u16 new_idx = 0, num_bds = 0;
        int rc;
 
+       if (!p_ll2_conn)
+               return 0;
+
        if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
                return 0;
 
@@ -1728,6 +1746,8 @@ int qed_ll2_post_rx_buffer(void *cxt,
        if (!p_ll2_conn)
                return -EINVAL;
        p_rx = &p_ll2_conn->rx_queue;
+       if (!p_rx->set_prod_addr)
+               return -EIO;
 
        spin_lock_irqsave(&p_rx->lock, flags);
        if (!list_empty(&p_rx->free_descq))
index 5bd58c6..6bb9ec9 100644 (file)
@@ -616,7 +616,12 @@ static int qed_enable_msix(struct qed_dev *cdev,
                        rc = cnt;
        }
 
-       if (rc > 0) {
+       /* For VFs, we should return with an error in case we didn't get the
+        * exact number of msix vectors as we requested.
+        * Not doing that will lead to a crash when starting queues for
+        * this VF.
+        */
+       if ((IS_PF(cdev) && rc > 0) || (IS_VF(cdev) && rc == cnt)) {
                /* MSI-x configuration was achieved */
                int_params->out.int_mode = QED_INT_MODE_MSIX;
                int_params->out.num_vectors = rc;
index da864d1..4f4b792 100644 (file)
@@ -1285,8 +1285,7 @@ qed_rdma_create_qp(void *rdma_cxt,
 
        if (!rdma_cxt || !in_params || !out_params ||
            !p_hwfn->p_rdma_info->active) {
-               DP_ERR(p_hwfn->cdev,
-                      "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
+               pr_err("qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
                       rdma_cxt, in_params, out_params);
                return NULL;
        }
index 2e62a2c..5630008 100644 (file)
@@ -501,6 +501,7 @@ struct qede_fastpath {
 #define QEDE_SP_HW_ERR                  4
 #define QEDE_SP_ARFS_CONFIG             5
 #define QEDE_SP_AER                    7
+#define QEDE_SP_DISABLE                        8
 
 #ifdef CONFIG_RFS_ACCEL
 int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
index c59b72c..a2e4dfb 100644 (file)
@@ -831,7 +831,7 @@ int qede_configure_vlan_filters(struct qede_dev *edev)
 int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
 {
        struct qede_dev *edev = netdev_priv(dev);
-       struct qede_vlan *vlan = NULL;
+       struct qede_vlan *vlan;
        int rc = 0;
 
        DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid);
@@ -842,7 +842,7 @@ int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
                if (vlan->vid == vid)
                        break;
 
-       if (!vlan || (vlan->vid != vid)) {
+       if (list_entry_is_head(vlan, &edev->vlan_list, list)) {
                DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
                           "Vlan isn't configured\n");
                goto out;
index 01ac1e9..1c7f9ed 100644 (file)
@@ -1009,6 +1009,13 @@ static void qede_sp_task(struct work_struct *work)
        struct qede_dev *edev = container_of(work, struct qede_dev,
                                             sp_task.work);
 
+       /* Disable execution of this deferred work once
+        * qede removal is in progress, this stop any future
+        * scheduling of sp_task.
+        */
+       if (test_bit(QEDE_SP_DISABLE, &edev->sp_flags))
+               return;
+
        /* The locking scheme depends on the specific flag:
         * In case of QEDE_SP_RECOVERY, acquiring the RTNL lock is required to
         * ensure that ongoing flows are ended and new ones are not started.
@@ -1300,6 +1307,7 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
        qede_rdma_dev_remove(edev, (mode == QEDE_REMOVE_RECOVERY));
 
        if (mode != QEDE_REMOVE_RECOVERY) {
+               set_bit(QEDE_SP_DISABLE, &edev->sp_flags);
                unregister_netdev(ndev);
 
                cancel_delayed_work_sync(&edev->sp_task);
@@ -1866,6 +1874,7 @@ static void qede_sync_free_irqs(struct qede_dev *edev)
        }
 
        edev->int_info.used_cnt = 0;
+       edev->int_info.msix_cnt = 0;
 }
 
 static int qede_req_msix_irqs(struct qede_dev *edev)
@@ -2419,7 +2428,6 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
        goto out;
 err4:
        qede_sync_free_irqs(edev);
-       memset(&edev->int_info.msix_cnt, 0, sizeof(struct qed_int_info));
 err3:
        qede_napi_disable_remove(edev);
 err2:
index 2376b27..c00ad57 100644 (file)
@@ -154,7 +154,7 @@ static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
                                      "driver lock acquired\n");
                        return 1;
                }
-               ssleep(1);
+               mdelay(1000);
        } while (++i < 10);
 
        netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n");
@@ -3274,7 +3274,7 @@ static int ql_adapter_reset(struct ql3_adapter *qdev)
                if ((value & ISP_CONTROL_SR) == 0)
                        break;
 
-               ssleep(1);
+               mdelay(1000);
        } while ((--max_wait_time));
 
        /*
@@ -3310,7 +3310,7 @@ static int ql_adapter_reset(struct ql3_adapter *qdev)
                                                   ispControlStatus);
                        if ((value & ISP_CONTROL_FSR) == 0)
                                break;
-                       ssleep(1);
+                       mdelay(1000);
                } while ((--max_wait_time));
        }
        if (max_wait_time == 0)
index d8882d0..d51bac7 100644 (file)
@@ -3156,8 +3156,10 @@ int qlcnic_83xx_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr,
 
                indirect_addr = QLC_83XX_FLASH_DIRECT_DATA(addr);
                ret = QLCRD32(adapter, indirect_addr, &err);
-               if (err == -EIO)
+               if (err == -EIO) {
+                       qlcnic_83xx_unlock_flash(adapter);
                        return err;
+               }
 
                word = ret;
                *(u32 *)p_data  = word;
index c7af5bc..4d8e337 100644 (file)
@@ -3502,12 +3502,16 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
        RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET);
        RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
 
+       /* The default value is 0x13. Change it to 0x2f */
+       rtl_csi_access_enable(tp, 0x2f);
+
        rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000);
 
        /* disable EEE */
        rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000);
 
        rtl_pcie_state_l2l3_disable(tp);
+       rtl_hw_aspm_clkreq_enable(tp, true);
 }
 
 DECLARE_RTL_COND(rtl_mac_ocp_e00e_cond)
index a466336..1f06b92 100644 (file)
@@ -2715,7 +2715,7 @@ static void
 rocker_fdb_offload_notify(struct rocker_port *rocker_port,
                          struct switchdev_notifier_fdb_info *recv_info)
 {
-       struct switchdev_notifier_fdb_info info;
+       struct switchdev_notifier_fdb_info info = {};
 
        info.addr = recv_info->addr;
        info.vid = recv_info->vid;
index 967a634..e33a9d2 100644 (file)
@@ -1822,7 +1822,7 @@ static void ofdpa_port_fdb_learn_work(struct work_struct *work)
                container_of(work, struct ofdpa_fdb_learn_work, work);
        bool removing = (lw->flags & OFDPA_OP_FLAG_REMOVE);
        bool learned = (lw->flags & OFDPA_OP_FLAG_LEARNED);
-       struct switchdev_notifier_fdb_info info;
+       struct switchdev_notifier_fdb_info info = {};
 
        info.addr = lw->addr;
        info.vid = lw->vid;
index ca9c00b..cff87de 100644 (file)
@@ -443,7 +443,7 @@ static int sis900_probe(struct pci_dev *pci_dev,
 #endif
 
        /* setup various bits in PCI command register */
-       ret = pci_enable_device(pci_dev);
+       ret = pcim_enable_device(pci_dev);
        if(ret) return ret;
 
        i = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32));
@@ -469,7 +469,7 @@ static int sis900_probe(struct pci_dev *pci_dev,
        ioaddr = pci_iomap(pci_dev, 0, 0);
        if (!ioaddr) {
                ret = -ENOMEM;
-               goto err_out_cleardev;
+               goto err_out;
        }
 
        sis_priv = netdev_priv(net_dev);
@@ -581,8 +581,6 @@ err_unmap_tx:
                          sis_priv->tx_ring_dma);
 err_out_unmap:
        pci_iounmap(pci_dev, ioaddr);
-err_out_cleardev:
-       pci_release_regions(pci_dev);
  err_out:
        free_netdev(net_dev);
        return ret;
@@ -2499,7 +2497,6 @@ static void sis900_remove(struct pci_dev *pci_dev)
                          sis_priv->tx_ring_dma);
        pci_iounmap(pci_dev, sis_priv->ioaddr);
        free_netdev(net_dev);
-       pci_release_regions(pci_dev);
 }
 
 static int __maybe_unused sis900_suspend(struct device *dev)
index 280ac01..ed81701 100644 (file)
@@ -21,7 +21,6 @@
 #include <linux/delay.h>
 #include <linux/mfd/syscon.h>
 #include <linux/regmap.h>
-#include <linux/pm_runtime.h>
 
 #include "stmmac_platform.h"
 
@@ -1529,9 +1528,6 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
                return ret;
        }
 
-       pm_runtime_enable(dev);
-       pm_runtime_get_sync(dev);
-
        if (bsp_priv->integrated_phy)
                rk_gmac_integrated_phy_powerup(bsp_priv);
 
@@ -1540,14 +1536,9 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
 
 static void rk_gmac_powerdown(struct rk_priv_data *gmac)
 {
-       struct device *dev = &gmac->pdev->dev;
-
        if (gmac->integrated_phy)
                rk_gmac_integrated_phy_powerdown(gmac);
 
-       pm_runtime_put_sync(dev);
-       pm_runtime_disable(dev);
-
        phy_power_on(gmac, false);
        gmac_clk_enable(gmac, false);
 }
index 67ba083..b217453 100644 (file)
@@ -1249,6 +1249,7 @@ const struct stmmac_ops dwmac410_ops = {
        .config_l3_filter = dwmac4_config_l3_filter,
        .config_l4_filter = dwmac4_config_l4_filter,
        .est_configure = dwmac5_est_configure,
+       .est_irq_status = dwmac5_est_irq_status,
        .fpe_configure = dwmac5_fpe_configure,
        .fpe_send_mpacket = dwmac5_fpe_send_mpacket,
        .fpe_irq_status = dwmac5_fpe_irq_status,
@@ -1300,6 +1301,7 @@ const struct stmmac_ops dwmac510_ops = {
        .config_l3_filter = dwmac4_config_l3_filter,
        .config_l4_filter = dwmac4_config_l4_filter,
        .est_configure = dwmac5_est_configure,
+       .est_irq_status = dwmac5_est_irq_status,
        .fpe_configure = dwmac5_fpe_configure,
        .fpe_send_mpacket = dwmac5_fpe_send_mpacket,
        .fpe_irq_status = dwmac5_fpe_irq_status,
index fcdb1d2..43eead7 100644 (file)
@@ -339,9 +339,9 @@ static inline bool stmmac_xdp_is_enabled(struct stmmac_priv *priv)
 static inline unsigned int stmmac_rx_offset(struct stmmac_priv *priv)
 {
        if (stmmac_xdp_is_enabled(priv))
-               return XDP_PACKET_HEADROOM + NET_IP_ALIGN;
+               return XDP_PACKET_HEADROOM;
 
-       return NET_SKB_PAD + NET_IP_ALIGN;
+       return 0;
 }
 
 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue);
index 7b8404a..fa90bcd 100644 (file)
@@ -4914,6 +4914,10 @@ read_again:
 
                prefetch(np);
 
+               /* Ensure a valid XSK buffer before proceed */
+               if (!buf->xdp)
+                       break;
+
                if (priv->extend_desc)
                        stmmac_rx_extended_status(priv, &priv->dev->stats,
                                                  &priv->xstats,
@@ -4934,10 +4938,6 @@ read_again:
                        continue;
                }
 
-               /* Ensure a valid XSK buffer before proceed */
-               if (!buf->xdp)
-                       break;
-
                /* XSK pool expects RX frame 1:1 mapped to XSK buffer */
                if (likely(status & rx_not_ls)) {
                        xsk_buff_free(buf->xdp);
index 4f3b643..8160087 100644 (file)
@@ -884,11 +884,13 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
        return 0;
 
 disable:
-       mutex_lock(&priv->plat->est->lock);
-       priv->plat->est->enable = false;
-       stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
-                            priv->plat->clk_ptp_rate);
-       mutex_unlock(&priv->plat->est->lock);
+       if (priv->plat->est) {
+               mutex_lock(&priv->plat->est->lock);
+               priv->plat->est->enable = false;
+               stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
+                                    priv->plat->clk_ptp_rate);
+               mutex_unlock(&priv->plat->est->lock);
+       }
 
        priv->plat->fpe_cfg->enable = false;
        stmmac_fpe_configure(priv, priv->ioaddr,
index 105821b..2a616c6 100644 (file)
@@ -34,18 +34,18 @@ static int stmmac_xdp_enable_pool(struct stmmac_priv *priv,
        need_update = netif_running(priv->dev) && stmmac_xdp_is_enabled(priv);
 
        if (need_update) {
-               stmmac_disable_rx_queue(priv, queue);
-               stmmac_disable_tx_queue(priv, queue);
                napi_disable(&ch->rx_napi);
                napi_disable(&ch->tx_napi);
+               stmmac_disable_rx_queue(priv, queue);
+               stmmac_disable_tx_queue(priv, queue);
        }
 
        set_bit(queue, priv->af_xdp_zc_qps);
 
        if (need_update) {
-               napi_enable(&ch->rxtx_napi);
                stmmac_enable_rx_queue(priv, queue);
                stmmac_enable_tx_queue(priv, queue);
+               napi_enable(&ch->rxtx_napi);
 
                err = stmmac_xsk_wakeup(priv->dev, queue, XDP_WAKEUP_RX);
                if (err)
@@ -72,10 +72,10 @@ static int stmmac_xdp_disable_pool(struct stmmac_priv *priv, u16 queue)
        need_update = netif_running(priv->dev) && stmmac_xdp_is_enabled(priv);
 
        if (need_update) {
+               napi_disable(&ch->rxtx_napi);
                stmmac_disable_rx_queue(priv, queue);
                stmmac_disable_tx_queue(priv, queue);
                synchronize_rcu();
-               napi_disable(&ch->rxtx_napi);
        }
 
        xsk_pool_dma_unmap(pool, STMMAC_RX_DMA_ATTR);
@@ -83,10 +83,10 @@ static int stmmac_xdp_disable_pool(struct stmmac_priv *priv, u16 queue)
        clear_bit(queue, priv->af_xdp_zc_qps);
 
        if (need_update) {
-               napi_enable(&ch->rx_napi);
-               napi_enable(&ch->tx_napi);
                stmmac_enable_rx_queue(priv, queue);
                stmmac_enable_tx_queue(priv, queue);
+               napi_enable(&ch->rx_napi);
+               napi_enable(&ch->tx_napi);
        }
 
        return 0;
index 74e7486..860644d 100644 (file)
@@ -8191,8 +8191,9 @@ static int niu_pci_vpd_fetch(struct niu *np, u32 start)
                err = niu_pci_vpd_scan_props(np, here, end);
                if (err < 0)
                        return err;
+               /* ret == 1 is not an error */
                if (err == 1)
-                       return -EINVAL;
+                       return 0;
        }
        return 0;
 }
index 718539c..67a08cb 100644 (file)
@@ -2060,8 +2060,12 @@ static void am65_cpsw_port_offload_fwd_mark_update(struct am65_cpsw_common *comm
 
        for (i = 1; i <= common->port_num; i++) {
                struct am65_cpsw_port *port = am65_common_get_port(common, i);
-               struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(port->ndev);
+               struct am65_cpsw_ndev_priv *priv;
 
+               if (!port->ndev)
+                       continue;
+
+               priv = am65_ndev_to_priv(port->ndev);
                priv->offload_fwd_mark = set_val;
        }
 }
index 9c29b36..599708a 100644 (file)
@@ -358,7 +358,7 @@ static int am65_cpsw_port_obj_del(struct net_device *ndev, const void *ctx,
 static void am65_cpsw_fdb_offload_notify(struct net_device *ndev,
                                         struct switchdev_notifier_fdb_info *rcv)
 {
-       struct switchdev_notifier_fdb_info info;
+       struct switchdev_notifier_fdb_info info = {};
 
        info.addr = rcv->addr;
        info.vid = rcv->vid;
index 57d279f..d1d0200 100644 (file)
@@ -920,7 +920,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
        struct cpdma_chan *txch;
        int ret, q_idx;
 
-       if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) {
+       if (skb_put_padto(skb, READ_ONCE(priv->tx_packet_min))) {
                cpsw_err(priv, tx_err, "packet pad failed\n");
                ndev->stats.tx_dropped++;
                return NET_XMIT_DROP;
@@ -1100,7 +1100,7 @@ static int cpsw_ndo_xdp_xmit(struct net_device *ndev, int n,
 
        for (i = 0; i < n; i++) {
                xdpf = frames[i];
-               if (xdpf->len < CPSW_MIN_PACKET_SIZE)
+               if (xdpf->len < READ_ONCE(priv->tx_packet_min))
                        break;
 
                if (cpsw_xdp_tx_frame(priv, xdpf, NULL, priv->emac_port))
@@ -1389,6 +1389,7 @@ static int cpsw_create_ports(struct cpsw_common *cpsw)
                priv->dev  = dev;
                priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
                priv->emac_port = i + 1;
+               priv->tx_packet_min = CPSW_MIN_PACKET_SIZE;
 
                if (is_valid_ether_addr(slave_data->mac_addr)) {
                        ether_addr_copy(priv->mac_addr, slave_data->mac_addr);
@@ -1686,6 +1687,7 @@ static int cpsw_dl_switch_mode_set(struct devlink *dl, u32 id,
 
                        priv = netdev_priv(sl_ndev);
                        slave->port_vlan = vlan;
+                       WRITE_ONCE(priv->tx_packet_min, CPSW_MIN_PACKET_SIZE_VLAN);
                        if (netif_running(sl_ndev))
                                cpsw_port_add_switch_def_ale_entries(priv,
                                                                     slave);
@@ -1714,6 +1716,7 @@ static int cpsw_dl_switch_mode_set(struct devlink *dl, u32 id,
 
                        priv = netdev_priv(slave->ndev);
                        slave->port_vlan = slave->data->dual_emac_res_vlan;
+                       WRITE_ONCE(priv->tx_packet_min, CPSW_MIN_PACKET_SIZE);
                        cpsw_port_add_dual_emac_def_ale_entries(priv, slave);
                }
 
index a323bea..2951fb7 100644 (file)
@@ -89,7 +89,8 @@ do {                                                          \
 
 #define CPSW_POLL_WEIGHT       64
 #define CPSW_RX_VLAN_ENCAP_HDR_SIZE            4
-#define CPSW_MIN_PACKET_SIZE   (VLAN_ETH_ZLEN)
+#define CPSW_MIN_PACKET_SIZE_VLAN      (VLAN_ETH_ZLEN)
+#define CPSW_MIN_PACKET_SIZE   (ETH_ZLEN)
 #define CPSW_MAX_PACKET_SIZE   (VLAN_ETH_FRAME_LEN +\
                                 ETH_FCS_LEN +\
                                 CPSW_RX_VLAN_ENCAP_HDR_SIZE)
@@ -380,6 +381,7 @@ struct cpsw_priv {
        u32 emac_port;
        struct cpsw_common *cpsw;
        int offload_fwd_mark;
+       u32 tx_packet_min;
 };
 
 #define ndev_to_cpsw(ndev) (((struct cpsw_priv *)netdev_priv(ndev))->cpsw)
index f7fb6e1..a7d97d4 100644 (file)
@@ -368,7 +368,7 @@ static int cpsw_port_obj_del(struct net_device *ndev, const void *ctx,
 static void cpsw_fdb_offload_notify(struct net_device *ndev,
                                    struct switchdev_notifier_fdb_info *rcv)
 {
-       struct switchdev_notifier_fdb_info info;
+       struct switchdev_notifier_fdb_info info = {};
 
        info.addr = rcv->addr;
        info.vid = rcv->vid;
index fcf3af7..8fe8887 100644 (file)
@@ -827,6 +827,12 @@ static void decode_data(struct sixpack *sp, unsigned char inbyte)
                return;
        }
 
+       if (sp->rx_count_cooked + 2 >= sizeof(sp->cooked_buf)) {
+               pr_err("6pack: cooked buffer overrun, data loss\n");
+               sp->rx_count = 0;
+               return;
+       }
+
        buf = sp->raw_buf;
        sp->cooked_buf[sp->rx_count_cooked++] =
                buf[0] | ((buf[1] << 2) & 0xc0);
index ebc976b..8caa61e 100644 (file)
@@ -418,7 +418,7 @@ static int hwsim_new_edge_nl(struct sk_buff *msg, struct genl_info *info)
        struct hwsim_edge *e;
        u32 v0, v1;
 
-       if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] &&
+       if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] ||
            !info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE])
                return -EINVAL;
 
@@ -528,14 +528,14 @@ static int hwsim_set_edge_lqi(struct sk_buff *msg, struct genl_info *info)
        u32 v0, v1;
        u8 lqi;
 
-       if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] &&
+       if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] ||
            !info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE])
                return -EINVAL;
 
        if (nla_parse_nested_deprecated(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX, info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE], hwsim_edge_policy, NULL))
                return -EINVAL;
 
-       if (!edge_attrs[MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID] &&
+       if (!edge_attrs[MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID] ||
            !edge_attrs[MAC802154_HWSIM_EDGE_ATTR_LQI])
                return -EINVAL;
 
index 110e4ee..ebd001f 100644 (file)
@@ -82,6 +82,17 @@ out:
 
 static int parent_count;
 
+static void mdio_mux_uninit_children(struct mdio_mux_parent_bus *pb)
+{
+       struct mdio_mux_child_bus *cb = pb->children;
+
+       while (cb) {
+               mdiobus_unregister(cb->mii_bus);
+               mdiobus_free(cb->mii_bus);
+               cb = cb->next;
+       }
+}
+
 int mdio_mux_init(struct device *dev,
                  struct device_node *mux_node,
                  int (*switch_fn)(int cur, int desired, void *data),
@@ -144,7 +155,7 @@ int mdio_mux_init(struct device *dev,
                cb = devm_kzalloc(dev, sizeof(*cb), GFP_KERNEL);
                if (!cb) {
                        ret_val = -ENOMEM;
-                       continue;
+                       goto err_loop;
                }
                cb->bus_number = v;
                cb->parent = pb;
@@ -152,8 +163,7 @@ int mdio_mux_init(struct device *dev,
                cb->mii_bus = mdiobus_alloc();
                if (!cb->mii_bus) {
                        ret_val = -ENOMEM;
-                       devm_kfree(dev, cb);
-                       continue;
+                       goto err_loop;
                }
                cb->mii_bus->priv = cb;
 
@@ -165,11 +175,15 @@ int mdio_mux_init(struct device *dev,
                cb->mii_bus->write = mdio_mux_write;
                r = of_mdiobus_register(cb->mii_bus, child_bus_node);
                if (r) {
+                       mdiobus_free(cb->mii_bus);
+                       if (r == -EPROBE_DEFER) {
+                               ret_val = r;
+                               goto err_loop;
+                       }
+                       devm_kfree(dev, cb);
                        dev_err(dev,
                                "Error: Failed to register MDIO bus for child %pOF\n",
                                child_bus_node);
-                       mdiobus_free(cb->mii_bus);
-                       devm_kfree(dev, cb);
                } else {
                        cb->next = pb->children;
                        pb->children = cb;
@@ -181,7 +195,10 @@ int mdio_mux_init(struct device *dev,
        }
 
        dev_err(dev, "Error: No acceptable child buses found\n");
-       devm_kfree(dev, pb);
+
+err_loop:
+       mdio_mux_uninit_children(pb);
+       of_node_put(child_bus_node);
 err_pb_kz:
        put_device(&parent_bus->dev);
 err_parent_bus:
@@ -193,14 +210,8 @@ EXPORT_SYMBOL_GPL(mdio_mux_init);
 void mdio_mux_uninit(void *mux_handle)
 {
        struct mdio_mux_parent_bus *pb = mux_handle;
-       struct mdio_mux_child_bus *cb = pb->children;
-
-       while (cb) {
-               mdiobus_unregister(cb->mii_bus);
-               mdiobus_free(cb->mii_bus);
-               cb = cb->next;
-       }
 
+       mdio_mux_uninit_children(pb);
        put_device(&pb->mii_bus->dev);
 }
 EXPORT_SYMBOL_GPL(mdio_mux_uninit);
index 63fda3f..4bd6133 100644 (file)
@@ -1089,7 +1089,7 @@ struct dw_xpcs *xpcs_create(struct mdio_device *mdiodev,
 
        xpcs = kzalloc(sizeof(*xpcs), GFP_KERNEL);
        if (!xpcs)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        xpcs->mdiodev = mdiodev;
 
index 7bf3011..83aea5c 100644 (file)
@@ -288,7 +288,7 @@ static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev)
        if (phydev->dev_flags & PHY_BRCM_DIS_TXCRXC_NOENRGY) {
                if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54210E ||
                    BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54810 ||
-                   BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54210E)
+                   BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54811)
                        val |= BCM54XX_SHD_SCR3_RXCTXC_DIS;
                else
                        val |= BCM54XX_SHD_SCR3_TRDDAPD;
index 11ff335..b7a5ae2 100644 (file)
@@ -81,6 +81,8 @@ static struct phy_driver mtk_gephy_driver[] = {
                 */
                .config_intr    = genphy_no_config_intr,
                .handle_interrupt = genphy_handle_interrupt_no_ack,
+               .suspend        = genphy_suspend,
+               .resume         = genphy_resume,
                .read_page      = mtk_gephy_read_page,
                .write_page     = mtk_gephy_write_page,
        },
@@ -93,6 +95,8 @@ static struct phy_driver mtk_gephy_driver[] = {
                 */
                .config_intr    = genphy_no_config_intr,
                .handle_interrupt = genphy_handle_interrupt_no_ack,
+               .suspend        = genphy_suspend,
+               .resume         = genphy_resume,
                .read_page      = mtk_gephy_read_page,
                .write_page     = mtk_gephy_write_page,
        },
index 4d53886..5c928f8 100644 (file)
@@ -401,11 +401,11 @@ static int ksz8041_config_aneg(struct phy_device *phydev)
 }
 
 static int ksz8051_ksz8795_match_phy_device(struct phy_device *phydev,
-                                           const u32 ksz_phy_id)
+                                           const bool ksz_8051)
 {
        int ret;
 
-       if ((phydev->phy_id & MICREL_PHY_ID_MASK) != ksz_phy_id)
+       if ((phydev->phy_id & MICREL_PHY_ID_MASK) != PHY_ID_KSZ8051)
                return 0;
 
        ret = phy_read(phydev, MII_BMSR);
@@ -418,7 +418,7 @@ static int ksz8051_ksz8795_match_phy_device(struct phy_device *phydev,
         * the switch does not.
         */
        ret &= BMSR_ERCAP;
-       if (ksz_phy_id == PHY_ID_KSZ8051)
+       if (ksz_8051)
                return ret;
        else
                return !ret;
@@ -426,7 +426,7 @@ static int ksz8051_ksz8795_match_phy_device(struct phy_device *phydev,
 
 static int ksz8051_match_phy_device(struct phy_device *phydev)
 {
-       return ksz8051_ksz8795_match_phy_device(phydev, PHY_ID_KSZ8051);
+       return ksz8051_ksz8795_match_phy_device(phydev, true);
 }
 
 static int ksz8081_config_init(struct phy_device *phydev)
@@ -535,7 +535,7 @@ static int ksz8061_config_init(struct phy_device *phydev)
 
 static int ksz8795_match_phy_device(struct phy_device *phydev)
 {
-       return ksz8051_ksz8795_match_phy_device(phydev, PHY_ID_KSZ87XX);
+       return ksz8051_ksz8795_match_phy_device(phydev, false);
 }
 
 static int ksz9021_load_values_from_of(struct phy_device *phydev,
@@ -1760,8 +1760,6 @@ static struct phy_driver ksphy_driver[] = {
        .name           = "Micrel KSZ87XX Switch",
        /* PHY_BASIC_FEATURES */
        .config_init    = kszphy_config_init,
-       .config_aneg    = ksz8873mll_config_aneg,
-       .read_status    = ksz8873mll_read_status,
        .match_phy_device = ksz8795_match_phy_device,
        .suspend        = genphy_suspend,
        .resume         = genphy_resume,
index 930e49e..7a099c3 100644 (file)
@@ -284,7 +284,7 @@ static struct channel *ppp_find_channel(struct ppp_net *pn, int unit);
 static int ppp_connect_channel(struct channel *pch, int unit);
 static int ppp_disconnect_channel(struct channel *pch);
 static void ppp_destroy_channel(struct channel *pch);
-static int unit_get(struct idr *p, void *ptr);
+static int unit_get(struct idr *p, void *ptr, int min);
 static int unit_set(struct idr *p, void *ptr, int n);
 static void unit_put(struct idr *p, int n);
 static void *unit_find(struct idr *p, int n);
@@ -1155,9 +1155,20 @@ static int ppp_unit_register(struct ppp *ppp, int unit, bool ifname_is_set)
        mutex_lock(&pn->all_ppp_mutex);
 
        if (unit < 0) {
-               ret = unit_get(&pn->units_idr, ppp);
+               ret = unit_get(&pn->units_idr, ppp, 0);
                if (ret < 0)
                        goto err;
+               if (!ifname_is_set) {
+                       while (1) {
+                               snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ret);
+                               if (!__dev_get_by_name(ppp->ppp_net, ppp->dev->name))
+                                       break;
+                               unit_put(&pn->units_idr, ret);
+                               ret = unit_get(&pn->units_idr, ppp, ret + 1);
+                               if (ret < 0)
+                                       goto err;
+                       }
+               }
        } else {
                /* Caller asked for a specific unit number. Fail with -EEXIST
                 * if unavailable. For backward compatibility, return -EEXIST
@@ -1306,7 +1317,7 @@ static int ppp_nl_newlink(struct net *src_net, struct net_device *dev,
         * the PPP unit identifer as suffix (i.e. ppp<unit_id>). This allows
         * userspace to infer the device name using to the PPPIOCGUNIT ioctl.
         */
-       if (!tb[IFLA_IFNAME])
+       if (!tb[IFLA_IFNAME] || !nla_len(tb[IFLA_IFNAME]) || !*(char *)nla_data(tb[IFLA_IFNAME]))
                conf.ifname_is_set = false;
 
        err = ppp_dev_configure(src_net, dev, &conf);
@@ -3552,9 +3563,9 @@ static int unit_set(struct idr *p, void *ptr, int n)
 }
 
 /* get new free unit number and associate pointer with it */
-static int unit_get(struct idr *p, void *ptr)
+static int unit_get(struct idr *p, void *ptr, int min)
 {
-       return idr_alloc(p, ptr, 0, 0, GFP_KERNEL);
+       return idr_alloc(p, ptr, min, 0, GFP_KERNEL);
 }
 
 /* put unit number back to a pool */
index e1994a2..2a1e31d 100644 (file)
@@ -184,6 +184,7 @@ struct asix_common_private {
        struct phy_device *phydev;
        u16 phy_addr;
        char phy_name[20];
+       bool embd_phy;
 };
 
 extern const struct driver_info ax88172a_info;
index ac92bc5..38cda59 100644 (file)
@@ -63,6 +63,29 @@ void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
                               value, index, data, size);
 }
 
+static int asix_check_host_enable(struct usbnet *dev, int in_pm)
+{
+       int i, ret;
+       u8 smsr;
+
+       for (i = 0; i < 30; ++i) {
+               ret = asix_set_sw_mii(dev, in_pm);
+               if (ret == -ENODEV || ret == -ETIMEDOUT)
+                       break;
+               usleep_range(1000, 1100);
+               ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
+                                   0, 0, 1, &smsr, in_pm);
+               if (ret == -ENODEV)
+                       break;
+               else if (ret < 0)
+                       continue;
+               else if (smsr & AX_HOST_EN)
+                       break;
+       }
+
+       return ret;
+}
+
 static void reset_asix_rx_fixup_info(struct asix_rx_fixup_info *rx)
 {
        /* Reset the variables that have a lifetime outside of
@@ -467,19 +490,11 @@ int asix_mdio_read(struct net_device *netdev, int phy_id, int loc)
 {
        struct usbnet *dev = netdev_priv(netdev);
        __le16 res;
-       u8 smsr;
-       int i = 0;
        int ret;
 
        mutex_lock(&dev->phy_mutex);
-       do {
-               ret = asix_set_sw_mii(dev, 0);
-               if (ret == -ENODEV || ret == -ETIMEDOUT)
-                       break;
-               usleep_range(1000, 1100);
-               ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
-                                   0, 0, 1, &smsr, 0);
-       } while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV));
+
+       ret = asix_check_host_enable(dev, 0);
        if (ret == -ENODEV || ret == -ETIMEDOUT) {
                mutex_unlock(&dev->phy_mutex);
                return ret;
@@ -505,23 +520,14 @@ static int __asix_mdio_write(struct net_device *netdev, int phy_id, int loc,
 {
        struct usbnet *dev = netdev_priv(netdev);
        __le16 res = cpu_to_le16(val);
-       u8 smsr;
-       int i = 0;
        int ret;
 
        netdev_dbg(dev->net, "asix_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x\n",
                        phy_id, loc, val);
 
        mutex_lock(&dev->phy_mutex);
-       do {
-               ret = asix_set_sw_mii(dev, 0);
-               if (ret == -ENODEV)
-                       break;
-               usleep_range(1000, 1100);
-               ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
-                                   0, 0, 1, &smsr, 0);
-       } while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV));
 
+       ret = asix_check_host_enable(dev, 0);
        if (ret == -ENODEV)
                goto out;
 
@@ -561,19 +567,11 @@ int asix_mdio_read_nopm(struct net_device *netdev, int phy_id, int loc)
 {
        struct usbnet *dev = netdev_priv(netdev);
        __le16 res;
-       u8 smsr;
-       int i = 0;
        int ret;
 
        mutex_lock(&dev->phy_mutex);
-       do {
-               ret = asix_set_sw_mii(dev, 1);
-               if (ret == -ENODEV || ret == -ETIMEDOUT)
-                       break;
-               usleep_range(1000, 1100);
-               ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
-                                   0, 0, 1, &smsr, 1);
-       } while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV));
+
+       ret = asix_check_host_enable(dev, 1);
        if (ret == -ENODEV || ret == -ETIMEDOUT) {
                mutex_unlock(&dev->phy_mutex);
                return ret;
@@ -595,22 +593,14 @@ asix_mdio_write_nopm(struct net_device *netdev, int phy_id, int loc, int val)
 {
        struct usbnet *dev = netdev_priv(netdev);
        __le16 res = cpu_to_le16(val);
-       u8 smsr;
-       int i = 0;
        int ret;
 
        netdev_dbg(dev->net, "asix_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x\n",
                        phy_id, loc, val);
 
        mutex_lock(&dev->phy_mutex);
-       do {
-               ret = asix_set_sw_mii(dev, 1);
-               if (ret == -ENODEV)
-                       break;
-               usleep_range(1000, 1100);
-               ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
-                                   0, 0, 1, &smsr, 1);
-       } while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV));
+
+       ret = asix_check_host_enable(dev, 1);
        if (ret == -ENODEV) {
                mutex_unlock(&dev->phy_mutex);
                return;
index 2c11521..dc87e8c 100644 (file)
@@ -354,24 +354,23 @@ out:
 static int ax88772_hw_reset(struct usbnet *dev, int in_pm)
 {
        struct asix_data *data = (struct asix_data *)&dev->data;
-       int ret, embd_phy;
+       struct asix_common_private *priv = dev->driver_priv;
        u16 rx_ctl;
+       int ret;
 
        ret = asix_write_gpio(dev, AX_GPIO_RSE | AX_GPIO_GPO_2 |
                              AX_GPIO_GPO2EN, 5, in_pm);
        if (ret < 0)
                goto out;
 
-       embd_phy = ((dev->mii.phy_id & 0x1f) == 0x10 ? 1 : 0);
-
-       ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, embd_phy,
+       ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, priv->embd_phy,
                             0, 0, NULL, in_pm);
        if (ret < 0) {
                netdev_dbg(dev->net, "Select PHY #1 failed: %d\n", ret);
                goto out;
        }
 
-       if (embd_phy) {
+       if (priv->embd_phy) {
                ret = asix_sw_reset(dev, AX_SWRESET_IPPD, in_pm);
                if (ret < 0)
                        goto out;
@@ -449,17 +448,16 @@ out:
 static int ax88772a_hw_reset(struct usbnet *dev, int in_pm)
 {
        struct asix_data *data = (struct asix_data *)&dev->data;
-       int ret, embd_phy;
+       struct asix_common_private *priv = dev->driver_priv;
        u16 rx_ctl, phy14h, phy15h, phy16h;
        u8 chipcode = 0;
+       int ret;
 
        ret = asix_write_gpio(dev, AX_GPIO_RSE, 5, in_pm);
        if (ret < 0)
                goto out;
 
-       embd_phy = ((dev->mii.phy_id & 0x1f) == 0x10 ? 1 : 0);
-
-       ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, embd_phy |
+       ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, priv->embd_phy |
                             AX_PHYSEL_SSEN, 0, 0, NULL, in_pm);
        if (ret < 0) {
                netdev_dbg(dev->net, "Select PHY #1 failed: %d\n", ret);
@@ -683,12 +681,6 @@ static int ax88772_init_phy(struct usbnet *dev)
        struct asix_common_private *priv = dev->driver_priv;
        int ret;
 
-       ret = asix_read_phy_addr(dev, true);
-       if (ret < 0)
-               return ret;
-
-       priv->phy_addr = ret;
-
        snprintf(priv->phy_name, sizeof(priv->phy_name), PHY_ID_FMT,
                 priv->mdio->id, priv->phy_addr);
 
@@ -716,6 +708,12 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
        int ret, i;
        u32 phyid;
 
+       priv = devm_kzalloc(&dev->udev->dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       dev->driver_priv = priv;
+
        usbnet_get_endpoints(dev, intf);
 
        /* Maybe the boot loader passed the MAC address via device tree */
@@ -751,6 +749,13 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
        dev->net->needed_headroom = 4; /* cf asix_tx_fixup() */
        dev->net->needed_tailroom = 4; /* cf asix_tx_fixup() */
 
+       ret = asix_read_phy_addr(dev, true);
+       if (ret < 0)
+               return ret;
+
+       priv->phy_addr = ret;
+       priv->embd_phy = ((priv->phy_addr & 0x1f) == 0x10);
+
        asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, 0, 0, 1, &chipcode, 0);
        chipcode &= AX_CHIPCODE_MASK;
 
@@ -773,12 +778,6 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
                dev->rx_urb_size = 2048;
        }
 
-       priv = devm_kzalloc(&dev->udev->dev, sizeof(*priv), GFP_KERNEL);
-       if (!priv)
-               return -ENOMEM;
-
-       dev->driver_priv = priv;
-
        priv->presvd_phy_bmcr = 0;
        priv->presvd_phy_advertise = 0;
        if (chipcode == AX_AX88772_CHIPCODE) {
@@ -817,6 +816,12 @@ static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf)
        asix_rx_fixup_common_free(dev->driver_priv);
 }
 
+static void ax88178_unbind(struct usbnet *dev, struct usb_interface *intf)
+{
+       asix_rx_fixup_common_free(dev->driver_priv);
+       kfree(dev->driver_priv);
+}
+
 static const struct ethtool_ops ax88178_ethtool_ops = {
        .get_drvinfo            = asix_get_drvinfo,
        .get_link               = asix_get_link,
@@ -1225,7 +1230,7 @@ static const struct driver_info ax88772b_info = {
 static const struct driver_info ax88178_info = {
        .description = "ASIX AX88178 USB 2.0 Ethernet",
        .bind = ax88178_bind,
-       .unbind = ax88772_unbind,
+       .unbind = ax88178_unbind,
        .status = asix_status,
        .link_reset = ax88178_link_reset,
        .reset = ax88178_reset,
index 2548938..6d092d7 100644 (file)
@@ -1154,7 +1154,7 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
 {
        struct phy_device *phydev = dev->net->phydev;
        struct ethtool_link_ksettings ecmd;
-       int ladv, radv, ret;
+       int ladv, radv, ret, link;
        u32 buf;
 
        /* clear LAN78xx interrupt status */
@@ -1162,9 +1162,12 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
        if (unlikely(ret < 0))
                return -EIO;
 
+       mutex_lock(&phydev->lock);
        phy_read_status(phydev);
+       link = phydev->link;
+       mutex_unlock(&phydev->lock);
 
-       if (!phydev->link && dev->link_on) {
+       if (!link && dev->link_on) {
                dev->link_on = false;
 
                /* reset MAC */
@@ -1177,7 +1180,7 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
                        return -EIO;
 
                del_timer(&dev->stat_monitor);
-       } else if (phydev->link && !dev->link_on) {
+       } else if (link && !dev->link_on) {
                dev->link_on = true;
 
                phy_ethtool_ksettings_get(phydev, &ecmd);
@@ -1466,9 +1469,14 @@ static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
 
 static u32 lan78xx_get_link(struct net_device *net)
 {
+       u32 link;
+
+       mutex_lock(&net->phydev->lock);
        phy_read_status(net->phydev);
+       link = net->phydev->link;
+       mutex_unlock(&net->phydev->lock);
 
-       return net->phydev->link;
+       return link;
 }
 
 static void lan78xx_get_drvinfo(struct net_device *net,
index 9a90718..9f9dd0d 100644 (file)
@@ -1,31 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- *  Copyright (c) 1999-2013 Petko Manolov (petkan@nucleusys.com)
+ *  Copyright (c) 1999-2021 Petko Manolov (petkan@nucleusys.com)
  *
- *     ChangeLog:
- *             ....    Most of the time spent on reading sources & docs.
- *             v0.2.x  First official release for the Linux kernel.
- *             v0.3.0  Beutified and structured, some bugs fixed.
- *             v0.3.x  URBifying bulk requests and bugfixing. First relatively
- *                     stable release. Still can touch device's registers only
- *                     from top-halves.
- *             v0.4.0  Control messages remained unurbified are now URBs.
- *                     Now we can touch the HW at any time.
- *             v0.4.9  Control urbs again use process context to wait. Argh...
- *                     Some long standing bugs (enable_net_traffic) fixed.
- *                     Also nasty trick about resubmiting control urb from
- *                     interrupt context used. Please let me know how it
- *                     behaves. Pegasus II support added since this version.
- *                     TODO: suppressing HCD warnings spewage on disconnect.
- *             v0.4.13 Ethernet address is now set at probe(), not at open()
- *                     time as this seems to break dhcpd.
- *             v0.5.0  branch to 2.5.x kernels
- *             v0.5.1  ethtool support added
- *             v0.5.5  rx socket buffers are in a pool and the their allocation
- *                     is out of the interrupt routine.
- *             ...
- *             v0.9.3  simplified [get|set]_register(s), async update registers
- *                     logic revisited, receive skb_pool removed.
  */
 
 #include <linux/sched.h>
@@ -45,7 +21,6 @@
 /*
  * Version Information
  */
-#define DRIVER_VERSION "v0.9.3 (2013/04/25)"
 #define DRIVER_AUTHOR "Petko Manolov <petkan@nucleusys.com>"
 #define DRIVER_DESC "Pegasus/Pegasus II USB Ethernet driver"
 
@@ -132,9 +107,15 @@ static int get_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data)
 static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size,
                         const void *data)
 {
-       return usb_control_msg_send(pegasus->usb, 0, PEGASUS_REQ_SET_REGS,
+       int ret;
+
+       ret = usb_control_msg_send(pegasus->usb, 0, PEGASUS_REQ_SET_REGS,
                                    PEGASUS_REQT_WRITE, 0, indx, data, size,
                                    1000, GFP_NOIO);
+       if (ret < 0)
+               netif_dbg(pegasus, drv, pegasus->net, "%s failed with %d\n", __func__, ret);
+
+       return ret;
 }
 
 /*
@@ -145,10 +126,15 @@ static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size,
 static int set_register(pegasus_t *pegasus, __u16 indx, __u8 data)
 {
        void *buf = &data;
+       int ret;
 
-       return usb_control_msg_send(pegasus->usb, 0, PEGASUS_REQ_SET_REG,
+       ret = usb_control_msg_send(pegasus->usb, 0, PEGASUS_REQ_SET_REG,
                                    PEGASUS_REQT_WRITE, data, indx, buf, 1,
                                    1000, GFP_NOIO);
+       if (ret < 0)
+               netif_dbg(pegasus, drv, pegasus->net, "%s failed with %d\n", __func__, ret);
+
+       return ret;
 }
 
 static int update_eth_regs_async(pegasus_t *pegasus)
@@ -188,10 +174,9 @@ static int update_eth_regs_async(pegasus_t *pegasus)
 
 static int __mii_op(pegasus_t *p, __u8 phy, __u8 indx, __u16 *regd, __u8 cmd)
 {
-       int i;
-       __u8 data[4] = { phy, 0, 0, indx };
+       int i, ret;
        __le16 regdi;
-       int ret = -ETIMEDOUT;
+       __u8 data[4] = { phy, 0, 0, indx };
 
        if (cmd & PHY_WRITE) {
                __le16 *t = (__le16 *) & data[1];
@@ -207,12 +192,15 @@ static int __mii_op(pegasus_t *p, __u8 phy, __u8 indx, __u16 *regd, __u8 cmd)
                if (data[0] & PHY_DONE)
                        break;
        }
-       if (i >= REG_TIMEOUT)
+       if (i >= REG_TIMEOUT) {
+               ret = -ETIMEDOUT;
                goto fail;
+       }
        if (cmd & PHY_READ) {
                ret = get_registers(p, PhyData, 2, &regdi);
+               if (ret < 0)
+                       goto fail;
                *regd = le16_to_cpu(regdi);
-               return ret;
        }
        return 0;
 fail:
@@ -235,9 +223,13 @@ static int write_mii_word(pegasus_t *pegasus, __u8 phy, __u8 indx, __u16 *regd)
 static int mdio_read(struct net_device *dev, int phy_id, int loc)
 {
        pegasus_t *pegasus = netdev_priv(dev);
+       int ret;
        u16 res;
 
-       read_mii_word(pegasus, phy_id, loc, &res);
+       ret = read_mii_word(pegasus, phy_id, loc, &res);
+       if (ret < 0)
+               return ret;
+
        return (int)res;
 }
 
@@ -251,10 +243,9 @@ static void mdio_write(struct net_device *dev, int phy_id, int loc, int val)
 
 static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata)
 {
-       int i;
-       __u8 tmp = 0;
+       int ret, i;
        __le16 retdatai;
-       int ret;
+       __u8 tmp = 0;
 
        set_register(pegasus, EpromCtrl, 0);
        set_register(pegasus, EpromOffset, index);
@@ -262,21 +253,25 @@ static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata)
 
        for (i = 0; i < REG_TIMEOUT; i++) {
                ret = get_registers(pegasus, EpromCtrl, 1, &tmp);
+               if (ret < 0)
+                       goto fail;
                if (tmp & EPROM_DONE)
                        break;
-               if (ret == -ESHUTDOWN)
-                       goto fail;
        }
-       if (i >= REG_TIMEOUT)
+       if (i >= REG_TIMEOUT) {
+               ret = -ETIMEDOUT;
                goto fail;
+       }
 
        ret = get_registers(pegasus, EpromData, 2, &retdatai);
+       if (ret < 0)
+               goto fail;
        *retdata = le16_to_cpu(retdatai);
        return ret;
 
 fail:
-       netif_warn(pegasus, drv, pegasus->net, "%s failed\n", __func__);
-       return -ETIMEDOUT;
+       netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__);
+       return ret;
 }
 
 #ifdef PEGASUS_WRITE_EEPROM
@@ -324,10 +319,10 @@ static int write_eprom_word(pegasus_t *pegasus, __u8 index, __u16 data)
        return ret;
 
 fail:
-       netif_warn(pegasus, drv, pegasus->net, "%s failed\n", __func__);
+       netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__);
        return -ETIMEDOUT;
 }
-#endif                         /* PEGASUS_WRITE_EEPROM */
+#endif /* PEGASUS_WRITE_EEPROM */
 
 static inline int get_node_id(pegasus_t *pegasus, u8 *id)
 {
@@ -367,19 +362,21 @@ static void set_ethernet_addr(pegasus_t *pegasus)
        return;
 err:
        eth_hw_addr_random(pegasus->net);
-       dev_info(&pegasus->intf->dev, "software assigned MAC address.\n");
+       netif_dbg(pegasus, drv, pegasus->net, "software assigned MAC address.\n");
 
        return;
 }
 
 static inline int reset_mac(pegasus_t *pegasus)
 {
+       int ret, i;
        __u8 data = 0x8;
-       int i;
 
        set_register(pegasus, EthCtrl1, data);
        for (i = 0; i < REG_TIMEOUT; i++) {
-               get_registers(pegasus, EthCtrl1, 1, &data);
+               ret = get_registers(pegasus, EthCtrl1, 1, &data);
+               if (ret < 0)
+                       goto fail;
                if (~data & 0x08) {
                        if (loopback)
                                break;
@@ -402,22 +399,29 @@ static inline int reset_mac(pegasus_t *pegasus)
        }
        if (usb_dev_id[pegasus->dev_index].vendor == VENDOR_ELCON) {
                __u16 auxmode;
-               read_mii_word(pegasus, 3, 0x1b, &auxmode);
+               ret = read_mii_word(pegasus, 3, 0x1b, &auxmode);
+               if (ret < 0)
+                       goto fail;
                auxmode |= 4;
                write_mii_word(pegasus, 3, 0x1b, &auxmode);
        }
 
        return 0;
+fail:
+       netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__);
+       return ret;
 }
 
 static int enable_net_traffic(struct net_device *dev, struct usb_device *usb)
 {
-       __u16 linkpart;
-       __u8 data[4];
        pegasus_t *pegasus = netdev_priv(dev);
        int ret;
+       __u16 linkpart;
+       __u8 data[4];
 
-       read_mii_word(pegasus, pegasus->phy, MII_LPA, &linkpart);
+       ret = read_mii_word(pegasus, pegasus->phy, MII_LPA, &linkpart);
+       if (ret < 0)
+               goto fail;
        data[0] = 0xc8; /* TX & RX enable, append status, no CRC */
        data[1] = 0;
        if (linkpart & (ADVERTISE_100FULL | ADVERTISE_10FULL))
@@ -435,11 +439,16 @@ static int enable_net_traffic(struct net_device *dev, struct usb_device *usb)
            usb_dev_id[pegasus->dev_index].vendor == VENDOR_LINKSYS2 ||
            usb_dev_id[pegasus->dev_index].vendor == VENDOR_DLINK) {
                u16 auxmode;
-               read_mii_word(pegasus, 0, 0x1b, &auxmode);
+               ret = read_mii_word(pegasus, 0, 0x1b, &auxmode);
+               if (ret < 0)
+                       goto fail;
                auxmode |= 4;
                write_mii_word(pegasus, 0, 0x1b, &auxmode);
        }
 
+       return ret;
+fail:
+       netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__);
        return ret;
 }
 
@@ -447,9 +456,9 @@ static void read_bulk_callback(struct urb *urb)
 {
        pegasus_t *pegasus = urb->context;
        struct net_device *net;
+       u8 *buf = urb->transfer_buffer;
        int rx_status, count = urb->actual_length;
        int status = urb->status;
-       u8 *buf = urb->transfer_buffer;
        __u16 pkt_len;
 
        if (!pegasus)
@@ -735,12 +744,16 @@ static inline void disable_net_traffic(pegasus_t *pegasus)
        set_registers(pegasus, EthCtrl0, sizeof(tmp), &tmp);
 }
 
-static inline void get_interrupt_interval(pegasus_t *pegasus)
+static inline int get_interrupt_interval(pegasus_t *pegasus)
 {
        u16 data;
        u8 interval;
+       int ret;
+
+       ret = read_eprom_word(pegasus, 4, &data);
+       if (ret < 0)
+               return ret;
 
-       read_eprom_word(pegasus, 4, &data);
        interval = data >> 8;
        if (pegasus->usb->speed != USB_SPEED_HIGH) {
                if (interval < 0x80) {
@@ -755,6 +768,8 @@ static inline void get_interrupt_interval(pegasus_t *pegasus)
                }
        }
        pegasus->intr_interval = interval;
+
+       return 0;
 }
 
 static void set_carrier(struct net_device *net)
@@ -820,7 +835,7 @@ static int pegasus_open(struct net_device *net)
        if (!pegasus->rx_skb)
                goto exit;
 
-       res = set_registers(pegasus, EthID, 6, net->dev_addr);
+       set_registers(pegasus, EthID, 6, net->dev_addr);
 
        usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb,
                          usb_rcvbulkpipe(pegasus->usb, 1),
@@ -880,7 +895,6 @@ static void pegasus_get_drvinfo(struct net_device *dev,
        pegasus_t *pegasus = netdev_priv(dev);
 
        strlcpy(info->driver, driver_name, sizeof(info->driver));
-       strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
        usb_make_path(pegasus->usb, info->bus_info, sizeof(info->bus_info));
 }
 
@@ -998,8 +1012,7 @@ static int pegasus_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
                data[0] = pegasus->phy;
                fallthrough;
        case SIOCDEVPRIVATE + 1:
-               read_mii_word(pegasus, data[0], data[1] & 0x1f, &data[3]);
-               res = 0;
+               res = read_mii_word(pegasus, data[0], data[1] & 0x1f, &data[3]);
                break;
        case SIOCDEVPRIVATE + 2:
                if (!capable(CAP_NET_ADMIN))
@@ -1033,22 +1046,25 @@ static void pegasus_set_multicast(struct net_device *net)
 
 static __u8 mii_phy_probe(pegasus_t *pegasus)
 {
-       int i;
+       int i, ret;
        __u16 tmp;
 
        for (i = 0; i < 32; i++) {
-               read_mii_word(pegasus, i, MII_BMSR, &tmp);
+               ret = read_mii_word(pegasus, i, MII_BMSR, &tmp);
+               if (ret < 0)
+                       goto fail;
                if (tmp == 0 || tmp == 0xffff || (tmp & BMSR_MEDIA) == 0)
                        continue;
                else
                        return i;
        }
-
+fail:
        return 0xff;
 }
 
 static inline void setup_pegasus_II(pegasus_t *pegasus)
 {
+       int ret;
        __u8 data = 0xa5;
 
        set_register(pegasus, Reg1d, 0);
@@ -1060,7 +1076,9 @@ static inline void setup_pegasus_II(pegasus_t *pegasus)
                set_register(pegasus, Reg7b, 2);
 
        set_register(pegasus, 0x83, data);
-       get_registers(pegasus, 0x83, 1, &data);
+       ret = get_registers(pegasus, 0x83, 1, &data);
+       if (ret < 0)
+               goto fail;
 
        if (data == 0xa5)
                pegasus->chip = 0x8513;
@@ -1075,6 +1093,10 @@ static inline void setup_pegasus_II(pegasus_t *pegasus)
                set_register(pegasus, Reg81, 6);
        else
                set_register(pegasus, Reg81, 2);
+
+       return;
+fail:
+       netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__);
 }
 
 static void check_carrier(struct work_struct *work)
@@ -1149,7 +1171,9 @@ static int pegasus_probe(struct usb_interface *intf,
                                | NETIF_MSG_PROBE | NETIF_MSG_LINK);
 
        pegasus->features = usb_dev_id[dev_index].private;
-       get_interrupt_interval(pegasus);
+       res = get_interrupt_interval(pegasus);
+       if (res)
+               goto out2;
        if (reset_mac(pegasus)) {
                dev_err(&intf->dev, "can't reset MAC\n");
                res = -EIO;
@@ -1296,7 +1320,7 @@ static void __init parse_id(char *id)
 
 static int __init pegasus_init(void)
 {
-       pr_info("%s: %s, " DRIVER_DESC "\n", driver_name, DRIVER_VERSION);
+       pr_info("%s: " DRIVER_DESC "\n", driver_name);
        if (devid)
                parse_id(devid);
        return usb_register(&pegasus_driver);
index e09b107..7983237 100644 (file)
@@ -3955,17 +3955,28 @@ static void rtl_clear_bp(struct r8152 *tp, u16 type)
        case RTL_VER_06:
                ocp_write_byte(tp, type, PLA_BP_EN, 0);
                break;
+       case RTL_VER_14:
+               ocp_write_word(tp, type, USB_BP2_EN, 0);
+
+               ocp_write_word(tp, type, USB_BP_8, 0);
+               ocp_write_word(tp, type, USB_BP_9, 0);
+               ocp_write_word(tp, type, USB_BP_10, 0);
+               ocp_write_word(tp, type, USB_BP_11, 0);
+               ocp_write_word(tp, type, USB_BP_12, 0);
+               ocp_write_word(tp, type, USB_BP_13, 0);
+               ocp_write_word(tp, type, USB_BP_14, 0);
+               ocp_write_word(tp, type, USB_BP_15, 0);
+               break;
        case RTL_VER_08:
        case RTL_VER_09:
        case RTL_VER_10:
        case RTL_VER_11:
        case RTL_VER_12:
        case RTL_VER_13:
-       case RTL_VER_14:
        case RTL_VER_15:
        default:
                if (type == MCU_TYPE_USB) {
-                       ocp_write_byte(tp, MCU_TYPE_USB, USB_BP2_EN, 0);
+                       ocp_write_word(tp, MCU_TYPE_USB, USB_BP2_EN, 0);
 
                        ocp_write_word(tp, MCU_TYPE_USB, USB_BP_8, 0);
                        ocp_write_word(tp, MCU_TYPE_USB, USB_BP_9, 0);
@@ -4331,7 +4342,6 @@ static bool rtl8152_is_fw_mac_ok(struct r8152 *tp, struct fw_mac *mac)
                case RTL_VER_11:
                case RTL_VER_12:
                case RTL_VER_13:
-               case RTL_VER_14:
                case RTL_VER_15:
                        fw_reg = 0xf800;
                        bp_ba_addr = PLA_BP_BA;
@@ -4339,6 +4349,13 @@ static bool rtl8152_is_fw_mac_ok(struct r8152 *tp, struct fw_mac *mac)
                        bp_start = PLA_BP_0;
                        max_bp = 8;
                        break;
+               case RTL_VER_14:
+                       fw_reg = 0xf800;
+                       bp_ba_addr = PLA_BP_BA;
+                       bp_en_addr = USB_BP2_EN;
+                       bp_start = PLA_BP_0;
+                       max_bp = 16;
+                       break;
                default:
                        goto out;
                }
index 56c3f85..eee4936 100644 (file)
@@ -63,7 +63,7 @@ static const unsigned long guest_offloads[] = {
        VIRTIO_NET_F_GUEST_CSUM
 };
 
-#define GUEST_OFFLOAD_LRO_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
+#define GUEST_OFFLOAD_GRO_HW_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
                                (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
                                (1ULL << VIRTIO_NET_F_GUEST_ECN)  | \
                                (1ULL << VIRTIO_NET_F_GUEST_UFO))
@@ -2515,7 +2515,7 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
                virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
                virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
                virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))) {
-               NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO/CSUM, disable LRO/CSUM first");
+               NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing GRO_HW/CSUM, disable GRO_HW/CSUM first");
                return -EOPNOTSUPP;
        }
 
@@ -2646,15 +2646,15 @@ static int virtnet_set_features(struct net_device *dev,
        u64 offloads;
        int err;
 
-       if ((dev->features ^ features) & NETIF_F_LRO) {
+       if ((dev->features ^ features) & NETIF_F_GRO_HW) {
                if (vi->xdp_enabled)
                        return -EBUSY;
 
-               if (features & NETIF_F_LRO)
+               if (features & NETIF_F_GRO_HW)
                        offloads = vi->guest_offloads_capable;
                else
                        offloads = vi->guest_offloads_capable &
-                                  ~GUEST_OFFLOAD_LRO_MASK;
+                                  ~GUEST_OFFLOAD_GRO_HW_MASK;
 
                err = virtnet_set_guest_offloads(vi, offloads);
                if (err)
@@ -3134,9 +3134,9 @@ static int virtnet_probe(struct virtio_device *vdev)
                dev->features |= NETIF_F_RXCSUM;
        if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
            virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6))
-               dev->features |= NETIF_F_LRO;
+               dev->features |= NETIF_F_GRO_HW;
        if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS))
-               dev->hw_features |= NETIF_F_LRO;
+               dev->hw_features |= NETIF_F_GRO_HW;
 
        dev->vlan_features = dev->features;
 
index 2b1b944..8bbe2a7 100644 (file)
@@ -1367,6 +1367,8 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
        bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
        bool is_ndisc = ipv6_ndisc_frame(skb);
 
+       nf_reset_ct(skb);
+
        /* loopback, multicast & non-ND link-local traffic; do not push through
         * packet taps again. Reset pkt_type for upper layers to process skb.
         * For strict packets with a source LLA, determine the dst using the
@@ -1429,6 +1431,8 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
        skb->skb_iif = vrf_dev->ifindex;
        IPCB(skb)->flags |= IPSKB_L3SLAVE;
 
+       nf_reset_ct(skb);
+
        if (ipv4_is_multicast(ip_hdr(skb)->daddr))
                goto out;
 
index 2403490..b4b1f75 100644 (file)
@@ -37,6 +37,7 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
        u32 sha1 = 0;
        u16 mac_type = 0, rf_id = 0;
        u8 *pnvm_data = NULL, *tmp;
+       bool hw_match = false;
        u32 size = 0;
        int ret;
 
@@ -83,6 +84,9 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
                                break;
                        }
 
+                       if (hw_match)
+                               break;
+
                        mac_type = le16_to_cpup((__le16 *)data);
                        rf_id = le16_to_cpup((__le16 *)(data + sizeof(__le16)));
 
@@ -90,15 +94,9 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
                                     "Got IWL_UCODE_TLV_HW_TYPE mac_type 0x%0x rf_id 0x%0x\n",
                                     mac_type, rf_id);
 
-                       if (mac_type != CSR_HW_REV_TYPE(trans->hw_rev) ||
-                           rf_id != CSR_HW_RFID_TYPE(trans->hw_rf_id)) {
-                               IWL_DEBUG_FW(trans,
-                                            "HW mismatch, skipping PNVM section, mac_type 0x%0x, rf_id 0x%0x.\n",
-                                            CSR_HW_REV_TYPE(trans->hw_rev), trans->hw_rf_id);
-                               ret = -ENOENT;
-                               goto out;
-                       }
-
+                       if (mac_type == CSR_HW_REV_TYPE(trans->hw_rev) &&
+                           rf_id == CSR_HW_RFID_TYPE(trans->hw_rf_id))
+                               hw_match = true;
                        break;
                case IWL_UCODE_TLV_SEC_RT: {
                        struct iwl_pnvm_section *section = (void *)data;
@@ -149,6 +147,15 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
        }
 
 done:
+       if (!hw_match) {
+               IWL_DEBUG_FW(trans,
+                            "HW mismatch, skipping PNVM section (need mac_type 0x%x rf_id 0x%x)\n",
+                            CSR_HW_REV_TYPE(trans->hw_rev),
+                            CSR_HW_RFID_TYPE(trans->hw_rf_id));
+               ret = -ENOENT;
+               goto out;
+       }
+
        if (!size) {
                IWL_DEBUG_FW(trans, "Empty PNVM, skipping.\n");
                ret = -ENOENT;
index 16baee3..0b8a0cd 100644 (file)
@@ -1110,12 +1110,80 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
                      IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
                      iwl_cfg_bz_a0_mr_a0, iwl_ax211_name),
 
+/* SoF with JF2 */
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+                     IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+                     iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name),
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+                     IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+                     iwlax210_2ax_cfg_so_jf_b0, iwl9560_name),
+
+/* SoF with JF */
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
+                     IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+                     iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name),
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
+                     IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+                     iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name),
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
+                     IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+                     iwlax210_2ax_cfg_so_jf_b0, iwl9461_name),
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
+                     IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+                     iwlax210_2ax_cfg_so_jf_b0, iwl9462_name),
+
 /* So with GF */
        _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
                      IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
                      IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY,
                      IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
-                     iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name)
+                     iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name),
+
+/* So with JF2 */
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+                     IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+                     iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name),
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+                     IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+                     iwlax210_2ax_cfg_so_jf_b0, iwl9560_name),
+
+/* So with JF */
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
+                     IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+                     iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name),
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
+                     IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+                     iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name),
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
+                     IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+                     iwlax210_2ax_cfg_so_jf_b0, iwl9461_name),
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
+                     IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+                     iwlax210_2ax_cfg_so_jf_b0, iwl9462_name)
 
 #endif /* CONFIG_IWLMVM */
 };
index 863aa18..4396077 100644 (file)
@@ -111,7 +111,7 @@ mt7915_mcu_get_cipher(int cipher)
        case WLAN_CIPHER_SUITE_SMS4:
                return MCU_CIPHER_WAPI;
        default:
-               return MT_CIPHER_NONE;
+               return MCU_CIPHER_NONE;
        }
 }
 
index edd3ba3..e68a562 100644 (file)
@@ -1073,7 +1073,8 @@ enum {
 };
 
 enum mcu_cipher_type {
-       MCU_CIPHER_WEP40 = 1,
+       MCU_CIPHER_NONE = 0,
+       MCU_CIPHER_WEP40,
        MCU_CIPHER_WEP104,
        MCU_CIPHER_WEP128,
        MCU_CIPHER_TKIP,
index cd690c6..9fbaacc 100644 (file)
@@ -111,7 +111,7 @@ mt7921_mcu_get_cipher(int cipher)
        case WLAN_CIPHER_SUITE_SMS4:
                return MCU_CIPHER_WAPI;
        default:
-               return MT_CIPHER_NONE;
+               return MCU_CIPHER_NONE;
        }
 }
 
index d76cf8f..de3c091 100644 (file)
@@ -199,7 +199,8 @@ struct sta_rec_sec {
 } __packed;
 
 enum mcu_cipher_type {
-       MCU_CIPHER_WEP40 = 1,
+       MCU_CIPHER_NONE = 0,
+       MCU_CIPHER_WEP40,
        MCU_CIPHER_WEP104,
        MCU_CIPHER_WEP128,
        MCU_CIPHER_TKIP,
index 1df9595..514f2c1 100644 (file)
@@ -136,6 +136,29 @@ static struct ieee80211_supported_band band_5ghz = {
 /* Assigned at module init. Guaranteed locally-administered and unicast. */
 static u8 fake_router_bssid[ETH_ALEN] __ro_after_init = {};
 
+static void virt_wifi_inform_bss(struct wiphy *wiphy)
+{
+       u64 tsf = div_u64(ktime_get_boottime_ns(), 1000);
+       struct cfg80211_bss *informed_bss;
+       static const struct {
+               u8 tag;
+               u8 len;
+               u8 ssid[8];
+       } __packed ssid = {
+               .tag = WLAN_EID_SSID,
+               .len = 8,
+               .ssid = "VirtWifi",
+       };
+
+       informed_bss = cfg80211_inform_bss(wiphy, &channel_5ghz,
+                                          CFG80211_BSS_FTYPE_PRESP,
+                                          fake_router_bssid, tsf,
+                                          WLAN_CAPABILITY_ESS, 0,
+                                          (void *)&ssid, sizeof(ssid),
+                                          DBM_TO_MBM(-50), GFP_KERNEL);
+       cfg80211_put_bss(wiphy, informed_bss);
+}
+
 /* Called with the rtnl lock held. */
 static int virt_wifi_scan(struct wiphy *wiphy,
                          struct cfg80211_scan_request *request)
@@ -156,28 +179,13 @@ static int virt_wifi_scan(struct wiphy *wiphy,
 /* Acquires and releases the rdev BSS lock. */
 static void virt_wifi_scan_result(struct work_struct *work)
 {
-       struct {
-               u8 tag;
-               u8 len;
-               u8 ssid[8];
-       } __packed ssid = {
-               .tag = WLAN_EID_SSID, .len = 8, .ssid = "VirtWifi",
-       };
-       struct cfg80211_bss *informed_bss;
        struct virt_wifi_wiphy_priv *priv =
                container_of(work, struct virt_wifi_wiphy_priv,
                             scan_result.work);
        struct wiphy *wiphy = priv_to_wiphy(priv);
        struct cfg80211_scan_info scan_info = { .aborted = false };
-       u64 tsf = div_u64(ktime_get_boottime_ns(), 1000);
 
-       informed_bss = cfg80211_inform_bss(wiphy, &channel_5ghz,
-                                          CFG80211_BSS_FTYPE_PRESP,
-                                          fake_router_bssid, tsf,
-                                          WLAN_CAPABILITY_ESS, 0,
-                                          (void *)&ssid, sizeof(ssid),
-                                          DBM_TO_MBM(-50), GFP_KERNEL);
-       cfg80211_put_bss(wiphy, informed_bss);
+       virt_wifi_inform_bss(wiphy);
 
        /* Schedules work which acquires and releases the rtnl lock. */
        cfg80211_scan_done(priv->scan_request, &scan_info);
@@ -225,10 +233,12 @@ static int virt_wifi_connect(struct wiphy *wiphy, struct net_device *netdev,
        if (!could_schedule)
                return -EBUSY;
 
-       if (sme->bssid)
+       if (sme->bssid) {
                ether_addr_copy(priv->connect_requested_bss, sme->bssid);
-       else
+       } else {
+               virt_wifi_inform_bss(wiphy);
                eth_zero_addr(priv->connect_requested_bss);
+       }
 
        wiphy_debug(wiphy, "connect\n");
 
@@ -241,11 +251,13 @@ static void virt_wifi_connect_complete(struct work_struct *work)
        struct virt_wifi_netdev_priv *priv =
                container_of(work, struct virt_wifi_netdev_priv, connect.work);
        u8 *requested_bss = priv->connect_requested_bss;
-       bool has_addr = !is_zero_ether_addr(requested_bss);
        bool right_addr = ether_addr_equal(requested_bss, fake_router_bssid);
        u16 status = WLAN_STATUS_SUCCESS;
 
-       if (!priv->is_up || (has_addr && !right_addr))
+       if (is_zero_ether_addr(requested_bss))
+               requested_bss = NULL;
+
+       if (!priv->is_up || (requested_bss && !right_addr))
                status = WLAN_STATUS_UNSPECIFIED_FAILURE;
        else
                priv->is_connected = true;
index 804e6c4..519361e 100644 (file)
@@ -64,10 +64,9 @@ static struct ipc_chnl_cfg modem_cfg[] = {
 
 int ipc_chnl_cfg_get(struct ipc_chnl_cfg *chnl_cfg, int index)
 {
-       int array_size = ARRAY_SIZE(modem_cfg);
-
-       if (index >= array_size) {
-               pr_err("index: %d and array_size %d", index, array_size);
+       if (index >= ARRAY_SIZE(modem_cfg)) {
+               pr_err("index: %d and array size %zu", index,
+                      ARRAY_SIZE(modem_cfg));
                return -ECHRNG;
        }
 
index 45e6923..f861994 100644 (file)
 #define IOSM_CP_VERSION 0x0100UL
 
 /* DL dir Aggregation support mask */
-#define DL_AGGR BIT(23)
+#define DL_AGGR BIT(9)
 
 /* UL dir Aggregation support mask */
-#define UL_AGGR BIT(22)
+#define UL_AGGR BIT(8)
 
 /* UL flow credit support mask */
 #define UL_FLOW_CREDIT BIT(21)
index 562de27..bdb2d32 100644 (file)
@@ -320,7 +320,7 @@ static void ipc_mux_dl_fcth_decode(struct iosm_mux *ipc_mux,
                return;
        }
 
-       ul_credits = fct->vfl.nr_of_bytes;
+       ul_credits = le32_to_cpu(fct->vfl.nr_of_bytes);
 
        dev_dbg(ipc_mux->dev, "Flow_Credit:: if_id[%d] Old: %d Grants: %d",
                if_id, ipc_mux->session[if_id].ul_flow_credits, ul_credits);
@@ -586,7 +586,7 @@ static bool ipc_mux_lite_send_qlt(struct iosm_mux *ipc_mux)
                qlt->reserved[0] = 0;
                qlt->reserved[1] = 0;
 
-               qlt->vfl.nr_of_bytes = session->ul_list.qlen;
+               qlt->vfl.nr_of_bytes = cpu_to_le32(session->ul_list.qlen);
 
                /* Add QLT to the transfer list. */
                skb_queue_tail(&ipc_mux->channel->ul_list,
index 4a74e3c..aae83db 100644 (file)
@@ -106,7 +106,7 @@ struct mux_lite_cmdh {
  * @nr_of_bytes:       Number of bytes available to transmit in the queue.
  */
 struct mux_lite_vfl {
-       u32 nr_of_bytes;
+       __le32 nr_of_bytes;
 };
 
 /**
index 91109e2..35d5907 100644 (file)
@@ -412,8 +412,8 @@ struct sk_buff *ipc_protocol_dl_td_process(struct iosm_protocol *ipc_protocol,
        }
 
        if (p_td->buffer.address != IPC_CB(skb)->mapping) {
-               dev_err(ipc_protocol->dev, "invalid buf=%p or skb=%p",
-                       (void *)p_td->buffer.address, skb->data);
+               dev_err(ipc_protocol->dev, "invalid buf=%llx or skb=%p",
+                       (unsigned long long)p_td->buffer.address, skb->data);
                ipc_pcie_kfree_skb(ipc_protocol->pcie, skb);
                skb = NULL;
                goto ret;
index b2357ad..b571d9c 100644 (file)
@@ -228,7 +228,7 @@ static void ipc_wwan_dellink(void *ctxt, struct net_device *dev,
 
        RCU_INIT_POINTER(ipc_wwan->sub_netlist[if_id], NULL);
        /* unregistering includes synchronize_net() */
-       unregister_netdevice(dev);
+       unregister_netdevice_queue(dev, head);
 
 unlock:
        mutex_unlock(&ipc_wwan->if_mutex);
index 1bc6b69..e4d0f69 100644 (file)
@@ -41,14 +41,14 @@ struct mhi_wwan_dev {
 /* Increment RX budget and schedule RX refill if necessary */
 static void mhi_wwan_rx_budget_inc(struct mhi_wwan_dev *mhiwwan)
 {
-       spin_lock(&mhiwwan->rx_lock);
+       spin_lock_bh(&mhiwwan->rx_lock);
 
        mhiwwan->rx_budget++;
 
        if (test_bit(MHI_WWAN_RX_REFILL, &mhiwwan->flags))
                schedule_work(&mhiwwan->rx_refill);
 
-       spin_unlock(&mhiwwan->rx_lock);
+       spin_unlock_bh(&mhiwwan->rx_lock);
 }
 
 /* Decrement RX budget if non-zero and return true on success */
@@ -56,7 +56,7 @@ static bool mhi_wwan_rx_budget_dec(struct mhi_wwan_dev *mhiwwan)
 {
        bool ret = false;
 
-       spin_lock(&mhiwwan->rx_lock);
+       spin_lock_bh(&mhiwwan->rx_lock);
 
        if (mhiwwan->rx_budget) {
                mhiwwan->rx_budget--;
@@ -64,7 +64,7 @@ static bool mhi_wwan_rx_budget_dec(struct mhi_wwan_dev *mhiwwan)
                        ret = true;
        }
 
-       spin_unlock(&mhiwwan->rx_lock);
+       spin_unlock_bh(&mhiwwan->rx_lock);
 
        return ret;
 }
@@ -130,9 +130,9 @@ static void mhi_wwan_ctrl_stop(struct wwan_port *port)
 {
        struct mhi_wwan_dev *mhiwwan = wwan_port_get_drvdata(port);
 
-       spin_lock(&mhiwwan->rx_lock);
+       spin_lock_bh(&mhiwwan->rx_lock);
        clear_bit(MHI_WWAN_RX_REFILL, &mhiwwan->flags);
-       spin_unlock(&mhiwwan->rx_lock);
+       spin_unlock_bh(&mhiwwan->rx_lock);
 
        cancel_work_sync(&mhiwwan->rx_refill);
 
index 3e16c31..35ece98 100644 (file)
@@ -164,11 +164,14 @@ static struct wwan_device *wwan_create_dev(struct device *parent)
                goto done_unlock;
 
        id = ida_alloc(&wwan_dev_ids, GFP_KERNEL);
-       if (id < 0)
+       if (id < 0) {
+               wwandev = ERR_PTR(id);
                goto done_unlock;
+       }
 
        wwandev = kzalloc(sizeof(*wwandev), GFP_KERNEL);
        if (!wwandev) {
+               wwandev = ERR_PTR(-ENOMEM);
                ida_free(&wwan_dev_ids, id);
                goto done_unlock;
        }
@@ -182,7 +185,8 @@ static struct wwan_device *wwan_create_dev(struct device *parent)
        err = device_register(&wwandev->dev);
        if (err) {
                put_device(&wwandev->dev);
-               wwandev = NULL;
+               wwandev = ERR_PTR(err);
+               goto done_unlock;
        }
 
 done_unlock:
@@ -984,6 +988,8 @@ static void wwan_create_default_link(struct wwan_device *wwandev,
                goto unlock;
        }
 
+       rtnl_configure_link(dev, NULL); /* Link initialized, notify new link */
+
 unlock:
        rtnl_unlock();
 
@@ -1012,8 +1018,8 @@ int wwan_register_ops(struct device *parent, const struct wwan_ops *ops,
                return -EINVAL;
 
        wwandev = wwan_create_dev(parent);
-       if (!wwandev)
-               return -ENOMEM;
+       if (IS_ERR(wwandev))
+               return PTR_ERR(wwandev);
 
        if (WARN_ON(wwandev->ops)) {
                wwan_remove_dev(wwandev);
index a9864fc..dd27c85 100644 (file)
@@ -192,8 +192,7 @@ static void nfcsim_recv_wq(struct work_struct *work)
 
                if (!IS_ERR(skb))
                        dev_kfree_skb(skb);
-
-               skb = ERR_PTR(-ENODEV);
+               return;
        }
 
        dev->cb(dev->nfc_digital_dev, dev->arg, skb);
index eb5d7a5..e3e72b8 100644 (file)
@@ -423,7 +423,7 @@ int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info)
        if (IS_ERR(tfm)) {
                ret = PTR_ERR(tfm);
                dev_err(&fw_info->ndev->nfc_dev->dev,
-                       "Cannot allocate shash (code=%d)\n", ret);
+                       "Cannot allocate shash (code=%pe)\n", tfm);
                goto out;
        }
 
index 2403b71..7454782 100644 (file)
@@ -2527,7 +2527,7 @@ static void deactivate_labels(void *region)
 
 static int init_active_labels(struct nd_region *nd_region)
 {
-       int i;
+       int i, rc = 0;
 
        for (i = 0; i < nd_region->ndr_mappings; i++) {
                struct nd_mapping *nd_mapping = &nd_region->mapping[i];
@@ -2546,13 +2546,14 @@ static int init_active_labels(struct nd_region *nd_region)
                        else if (test_bit(NDD_LABELING, &nvdimm->flags))
                                /* fail, labels needed to disambiguate dpa */;
                        else
-                               return 0;
+                               continue;
 
                        dev_err(&nd_region->dev, "%s: is %s, failing probe\n",
                                        dev_name(&nd_mapping->nvdimm->dev),
                                        test_bit(NDD_LOCKED, &nvdimm->flags)
                                        ? "locked" : "disabled");
-                       return -ENXIO;
+                       rc = -ENXIO;
+                       goto out;
                }
                nd_mapping->ndd = ndd;
                atomic_inc(&nvdimm->busy);
@@ -2586,13 +2587,17 @@ static int init_active_labels(struct nd_region *nd_region)
                        break;
        }
 
-       if (i < nd_region->ndr_mappings) {
+       if (i < nd_region->ndr_mappings)
+               rc = -ENOMEM;
+
+out:
+       if (rc) {
                deactivate_labels(nd_region);
-               return -ENOMEM;
+               return rc;
        }
 
        return devm_add_action_or_reset(&nd_region->dev, deactivate_labels,
-                       nd_region);
+                                       nd_region);
 }
 
 int nd_region_register_namespaces(struct nd_region *nd_region, int *err)
index c3f3d77..dc0450c 100644 (file)
@@ -33,12 +33,12 @@ config NVME_HWMON
          in the system.
 
 config NVME_FABRICS
+       select NVME_CORE
        tristate
 
 config NVME_RDMA
        tristate "NVM Express over Fabrics RDMA host driver"
        depends on INFINIBAND && INFINIBAND_ADDR_TRANS && BLOCK
-       select NVME_CORE
        select NVME_FABRICS
        select SG_POOL
        help
@@ -55,7 +55,6 @@ config NVME_FC
        tristate "NVM Express over Fabrics FC host driver"
        depends on BLOCK
        depends on HAS_DMA
-       select NVME_CORE
        select NVME_FABRICS
        select SG_POOL
        help
@@ -72,7 +71,6 @@ config NVME_TCP
        tristate "NVM Express over Fabrics TCP host driver"
        depends on INET
        depends on BLOCK
-       select NVME_CORE
        select NVME_FABRICS
        select CRYPTO
        select CRYPTO_CRC32C
index cbc5097..dfaacd4 100644 (file)
@@ -12,7 +12,6 @@ obj-$(CONFIG_NVME_TCP)                        += nvme-tcp.o
 nvme-core-y                            := core.o ioctl.o
 nvme-core-$(CONFIG_TRACING)            += trace.o
 nvme-core-$(CONFIG_NVME_MULTIPATH)     += multipath.o
-nvme-core-$(CONFIG_NVM)                        += lightnvm.o
 nvme-core-$(CONFIG_BLK_DEV_ZONED)      += zns.o
 nvme-core-$(CONFIG_FAULT_INJECTION_DEBUG_FS)   += fault_inject.o
 nvme-core-$(CONFIG_NVME_HWMON)         += hwmon.o
index dfd9dec..8679a10 100644 (file)
@@ -587,9 +587,6 @@ static void nvme_free_ns(struct kref *kref)
 {
        struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref);
 
-       if (ns->ndev)
-               nvme_nvm_unregister(ns);
-
        put_disk(ns->disk);
        nvme_put_ns_head(ns->head);
        nvme_put_ctrl(ns->ctrl);
@@ -968,12 +965,11 @@ void nvme_cleanup_cmd(struct request *req)
 {
        if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
                struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
-               struct page *page = req->special_vec.bv_page;
 
-               if (page == ctrl->discard_page)
+               if (req->special_vec.bv_page == ctrl->discard_page)
                        clear_bit_unlock(0, &ctrl->discard_page_busy);
                else
-                       kfree(page_address(page) + req->special_vec.bv_offset);
+                       kfree(bvec_virt(&req->special_vec));
        }
 }
 EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);
@@ -1029,7 +1025,8 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req)
                return BLK_STS_IOERR;
        }
 
-       cmd->common.command_id = req->tag;
+       nvme_req(req)->genctr++;
+       cmd->common.command_id = nvme_cid(req);
        trace_nvme_setup_cmd(req, cmd);
        return ret;
 }
@@ -1822,7 +1819,7 @@ static void nvme_update_disk_info(struct gendisk *disk,
 static inline bool nvme_first_scan(struct gendisk *disk)
 {
        /* nvme_alloc_ns() scans the disk prior to adding it */
-       return !(disk->flags & GENHD_FL_UP);
+       return !disk_live(disk);
 }
 
 static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id)
@@ -1890,7 +1887,7 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
                nvme_update_disk_info(ns->head->disk, ns, id);
                blk_stack_limits(&ns->head->disk->queue->limits,
                                 &ns->queue->limits, 0);
-               blk_queue_update_readahead(ns->head->disk->queue);
+               disk_update_readahead(ns->head->disk);
                blk_mq_unfreeze_queue(ns->head->disk->queue);
        }
        return 0;
@@ -3218,9 +3215,6 @@ static const struct attribute_group nvme_ns_id_attr_group = {
 
 const struct attribute_group *nvme_ns_id_attr_groups[] = {
        &nvme_ns_id_attr_group,
-#ifdef CONFIG_NVM
-       &nvme_nvm_attr_group,
-#endif
        NULL,
 };
 
@@ -3729,9 +3723,14 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
        if (!ns)
                goto out_free_id;
 
-       ns->queue = blk_mq_init_queue(ctrl->tagset);
-       if (IS_ERR(ns->queue))
+       disk = blk_mq_alloc_disk(ctrl->tagset, ns);
+       if (IS_ERR(disk))
                goto out_free_ns;
+       disk->fops = &nvme_bdev_ops;
+       disk->private_data = ns;
+
+       ns->disk = disk;
+       ns->queue = disk->queue;
 
        if (ctrl->opts && ctrl->opts->data_digest)
                blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue);
@@ -3740,20 +3739,12 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
        if (ctrl->ops->flags & NVME_F_PCI_P2PDMA)
                blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue);
 
-       ns->queue->queuedata = ns;
        ns->ctrl = ctrl;
        kref_init(&ns->kref);
 
        if (nvme_init_ns_head(ns, nsid, ids, id->nmic & NVME_NS_NMIC_SHARED))
-               goto out_free_queue;
+               goto out_cleanup_disk;
 
-       disk = alloc_disk_node(0, node);
-       if (!disk)
-               goto out_unlink_ns;
-
-       disk->fops = &nvme_bdev_ops;
-       disk->private_data = ns;
-       disk->queue = ns->queue;
        /*
         * Without the multipath code enabled, multiple controller per
         * subsystems are visible as devices and thus we cannot use the
@@ -3762,17 +3753,9 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
        if (!nvme_mpath_set_disk_name(ns, disk->disk_name, &disk->flags))
                sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance,
                        ns->head->instance);
-       ns->disk = disk;
 
        if (nvme_update_ns_info(ns, id))
-               goto out_put_disk;
-
-       if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) {
-               if (nvme_nvm_register(ns, disk->disk_name, node)) {
-                       dev_warn(ctrl->device, "LightNVM init failure\n");
-                       goto out_put_disk;
-               }
-       }
+               goto out_unlink_ns;
 
        down_write(&ctrl->namespaces_rwsem);
        list_add_tail(&ns->list, &ctrl->namespaces);
@@ -3789,10 +3772,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
        kfree(id);
 
        return;
- out_put_disk:
-       /* prevent double queue cleanup */
-       ns->disk->queue = NULL;
-       put_disk(ns->disk);
+
  out_unlink_ns:
        mutex_lock(&ctrl->subsys->lock);
        list_del_rcu(&ns->siblings);
@@ -3800,8 +3780,8 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
                list_del_init(&ns->head->entry);
        mutex_unlock(&ctrl->subsys->lock);
        nvme_put_ns_head(ns->head);
- out_free_queue:
-       blk_cleanup_queue(ns->queue);
+ out_cleanup_disk:
+       blk_cleanup_disk(disk);
  out_free_ns:
        kfree(ns);
  out_free_id:
@@ -3826,14 +3806,12 @@ static void nvme_ns_remove(struct nvme_ns *ns)
        nvme_mpath_clear_current_path(ns);
        synchronize_srcu(&ns->head->srcu); /* wait for concurrent submissions */
 
-       if (ns->disk->flags & GENHD_FL_UP) {
-               if (!nvme_ns_head_multipath(ns->head))
-                       nvme_cdev_del(&ns->cdev, &ns->cdev_device);
-               del_gendisk(ns->disk);
-               blk_cleanup_queue(ns->queue);
-               if (blk_get_integrity(ns->disk))
-                       blk_integrity_unregister(ns->disk);
-       }
+       if (!nvme_ns_head_multipath(ns->head))
+               nvme_cdev_del(&ns->cdev, &ns->cdev_device);
+       del_gendisk(ns->disk);
+       blk_cleanup_queue(ns->queue);
+       if (blk_get_integrity(ns->disk))
+               blk_integrity_unregister(ns->disk);
 
        down_write(&ns->ctrl->namespaces_rwsem);
        list_del_init(&ns->list);
index a5469fd..668c6bb 100644 (file)
@@ -719,7 +719,6 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
                                ret = -EINVAL;
                                goto out;
                        }
-                       nvmf_host_put(opts->host);
                        opts->host = nvmf_host_add(p);
                        kfree(p);
                        if (!opts->host) {
index 305ddd4..2231496 100644 (file)
@@ -342,9 +342,7 @@ static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned int cmd,
        case NVME_IOCTL_IO64_CMD:
                return nvme_user_cmd64(ns->ctrl, ns, argp);
        default:
-               if (!ns->ndev)
-                       return -ENOTTY;
-               return nvme_nvm_ioctl(ns, cmd, argp);
+               return -ENOTTY;
        }
 }
 
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
deleted file mode 100644 (file)
index e9d9ad4..0000000
+++ /dev/null
@@ -1,1274 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * nvme-lightnvm.c - LightNVM NVMe device
- *
- * Copyright (C) 2014-2015 IT University of Copenhagen
- * Initial release: Matias Bjorling <mb@lightnvm.io>
- */
-
-#include "nvme.h"
-
-#include <linux/nvme.h>
-#include <linux/bitops.h>
-#include <linux/lightnvm.h>
-#include <linux/vmalloc.h>
-#include <linux/sched/sysctl.h>
-#include <uapi/linux/lightnvm.h>
-
-enum nvme_nvm_admin_opcode {
-       nvme_nvm_admin_identity         = 0xe2,
-       nvme_nvm_admin_get_bb_tbl       = 0xf2,
-       nvme_nvm_admin_set_bb_tbl       = 0xf1,
-};
-
-enum nvme_nvm_log_page {
-       NVME_NVM_LOG_REPORT_CHUNK       = 0xca,
-};
-
-struct nvme_nvm_ph_rw {
-       __u8                    opcode;
-       __u8                    flags;
-       __u16                   command_id;
-       __le32                  nsid;
-       __u64                   rsvd2;
-       __le64                  metadata;
-       __le64                  prp1;
-       __le64                  prp2;
-       __le64                  spba;
-       __le16                  length;
-       __le16                  control;
-       __le32                  dsmgmt;
-       __le64                  resv;
-};
-
-struct nvme_nvm_erase_blk {
-       __u8                    opcode;
-       __u8                    flags;
-       __u16                   command_id;
-       __le32                  nsid;
-       __u64                   rsvd[2];
-       __le64                  prp1;
-       __le64                  prp2;
-       __le64                  spba;
-       __le16                  length;
-       __le16                  control;
-       __le32                  dsmgmt;
-       __le64                  resv;
-};
-
-struct nvme_nvm_identity {
-       __u8                    opcode;
-       __u8                    flags;
-       __u16                   command_id;
-       __le32                  nsid;
-       __u64                   rsvd[2];
-       __le64                  prp1;
-       __le64                  prp2;
-       __u32                   rsvd11[6];
-};
-
-struct nvme_nvm_getbbtbl {
-       __u8                    opcode;
-       __u8                    flags;
-       __u16                   command_id;
-       __le32                  nsid;
-       __u64                   rsvd[2];
-       __le64                  prp1;
-       __le64                  prp2;
-       __le64                  spba;
-       __u32                   rsvd4[4];
-};
-
-struct nvme_nvm_setbbtbl {
-       __u8                    opcode;
-       __u8                    flags;
-       __u16                   command_id;
-       __le32                  nsid;
-       __le64                  rsvd[2];
-       __le64                  prp1;
-       __le64                  prp2;
-       __le64                  spba;
-       __le16                  nlb;
-       __u8                    value;
-       __u8                    rsvd3;
-       __u32                   rsvd4[3];
-};
-
-struct nvme_nvm_command {
-       union {
-               struct nvme_common_command common;
-               struct nvme_nvm_ph_rw ph_rw;
-               struct nvme_nvm_erase_blk erase;
-               struct nvme_nvm_identity identity;
-               struct nvme_nvm_getbbtbl get_bb;
-               struct nvme_nvm_setbbtbl set_bb;
-       };
-};
-
-struct nvme_nvm_id12_grp {
-       __u8                    mtype;
-       __u8                    fmtype;
-       __le16                  res16;
-       __u8                    num_ch;
-       __u8                    num_lun;
-       __u8                    num_pln;
-       __u8                    rsvd1;
-       __le16                  num_chk;
-       __le16                  num_pg;
-       __le16                  fpg_sz;
-       __le16                  csecs;
-       __le16                  sos;
-       __le16                  rsvd2;
-       __le32                  trdt;
-       __le32                  trdm;
-       __le32                  tprt;
-       __le32                  tprm;
-       __le32                  tbet;
-       __le32                  tbem;
-       __le32                  mpos;
-       __le32                  mccap;
-       __le16                  cpar;
-       __u8                    reserved[906];
-} __packed;
-
-struct nvme_nvm_id12_addrf {
-       __u8                    ch_offset;
-       __u8                    ch_len;
-       __u8                    lun_offset;
-       __u8                    lun_len;
-       __u8                    pln_offset;
-       __u8                    pln_len;
-       __u8                    blk_offset;
-       __u8                    blk_len;
-       __u8                    pg_offset;
-       __u8                    pg_len;
-       __u8                    sec_offset;
-       __u8                    sec_len;
-       __u8                    res[4];
-} __packed;
-
-struct nvme_nvm_id12 {
-       __u8                    ver_id;
-       __u8                    vmnt;
-       __u8                    cgrps;
-       __u8                    res;
-       __le32                  cap;
-       __le32                  dom;
-       struct nvme_nvm_id12_addrf ppaf;
-       __u8                    resv[228];
-       struct nvme_nvm_id12_grp grp;
-       __u8                    resv2[2880];
-} __packed;
-
-struct nvme_nvm_bb_tbl {
-       __u8    tblid[4];
-       __le16  verid;
-       __le16  revid;
-       __le32  rvsd1;
-       __le32  tblks;
-       __le32  tfact;
-       __le32  tgrown;
-       __le32  tdresv;
-       __le32  thresv;
-       __le32  rsvd2[8];
-       __u8    blk[];
-};
-
-struct nvme_nvm_id20_addrf {
-       __u8                    grp_len;
-       __u8                    pu_len;
-       __u8                    chk_len;
-       __u8                    lba_len;
-       __u8                    resv[4];
-};
-
-struct nvme_nvm_id20 {
-       __u8                    mjr;
-       __u8                    mnr;
-       __u8                    resv[6];
-
-       struct nvme_nvm_id20_addrf lbaf;
-
-       __le32                  mccap;
-       __u8                    resv2[12];
-
-       __u8                    wit;
-       __u8                    resv3[31];
-
-       /* Geometry */
-       __le16                  num_grp;
-       __le16                  num_pu;
-       __le32                  num_chk;
-       __le32                  clba;
-       __u8                    resv4[52];
-
-       /* Write data requirements */
-       __le32                  ws_min;
-       __le32                  ws_opt;
-       __le32                  mw_cunits;
-       __le32                  maxoc;
-       __le32                  maxocpu;
-       __u8                    resv5[44];
-
-       /* Performance related metrics */
-       __le32                  trdt;
-       __le32                  trdm;
-       __le32                  twrt;
-       __le32                  twrm;
-       __le32                  tcrst;
-       __le32                  tcrsm;
-       __u8                    resv6[40];
-
-       /* Reserved area */
-       __u8                    resv7[2816];
-
-       /* Vendor specific */
-       __u8                    vs[1024];
-};
-
-struct nvme_nvm_chk_meta {
-       __u8    state;
-       __u8    type;
-       __u8    wi;
-       __u8    rsvd[5];
-       __le64  slba;
-       __le64  cnlb;
-       __le64  wp;
-};
-
-/*
- * Check we didn't inadvertently grow the command struct
- */
-static inline void _nvme_nvm_check_size(void)
-{
-       BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64);
-       BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64);
-       BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64);
-       BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl) != 64);
-       BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl) != 64);
-       BUILD_BUG_ON(sizeof(struct nvme_nvm_id12_grp) != 960);
-       BUILD_BUG_ON(sizeof(struct nvme_nvm_id12_addrf) != 16);
-       BUILD_BUG_ON(sizeof(struct nvme_nvm_id12) != NVME_IDENTIFY_DATA_SIZE);
-       BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl) != 64);
-       BUILD_BUG_ON(sizeof(struct nvme_nvm_id20_addrf) != 8);
-       BUILD_BUG_ON(sizeof(struct nvme_nvm_id20) != NVME_IDENTIFY_DATA_SIZE);
-       BUILD_BUG_ON(sizeof(struct nvme_nvm_chk_meta) != 32);
-       BUILD_BUG_ON(sizeof(struct nvme_nvm_chk_meta) !=
-                                               sizeof(struct nvm_chk_meta));
-}
-
-static void nvme_nvm_set_addr_12(struct nvm_addrf_12 *dst,
-                                struct nvme_nvm_id12_addrf *src)
-{
-       dst->ch_len = src->ch_len;
-       dst->lun_len = src->lun_len;
-       dst->blk_len = src->blk_len;
-       dst->pg_len = src->pg_len;
-       dst->pln_len = src->pln_len;
-       dst->sec_len = src->sec_len;
-
-       dst->ch_offset = src->ch_offset;
-       dst->lun_offset = src->lun_offset;
-       dst->blk_offset = src->blk_offset;
-       dst->pg_offset = src->pg_offset;
-       dst->pln_offset = src->pln_offset;
-       dst->sec_offset = src->sec_offset;
-
-       dst->ch_mask = ((1ULL << dst->ch_len) - 1) << dst->ch_offset;
-       dst->lun_mask = ((1ULL << dst->lun_len) - 1) << dst->lun_offset;
-       dst->blk_mask = ((1ULL << dst->blk_len) - 1) << dst->blk_offset;
-       dst->pg_mask = ((1ULL << dst->pg_len) - 1) << dst->pg_offset;
-       dst->pln_mask = ((1ULL << dst->pln_len) - 1) << dst->pln_offset;
-       dst->sec_mask = ((1ULL << dst->sec_len) - 1) << dst->sec_offset;
-}
-
-static int nvme_nvm_setup_12(struct nvme_nvm_id12 *id,
-                            struct nvm_geo *geo)
-{
-       struct nvme_nvm_id12_grp *src;
-       int sec_per_pg, sec_per_pl, pg_per_blk;
-
-       if (id->cgrps != 1)
-               return -EINVAL;
-
-       src = &id->grp;
-
-       if (src->mtype != 0) {
-               pr_err("nvm: memory type not supported\n");
-               return -EINVAL;
-       }
-
-       /* 1.2 spec. only reports a single version id - unfold */
-       geo->major_ver_id = id->ver_id;
-       geo->minor_ver_id = 2;
-
-       /* Set compacted version for upper layers */
-       geo->version = NVM_OCSSD_SPEC_12;
-
-       geo->num_ch = src->num_ch;
-       geo->num_lun = src->num_lun;
-       geo->all_luns = geo->num_ch * geo->num_lun;
-
-       geo->num_chk = le16_to_cpu(src->num_chk);
-
-       geo->csecs = le16_to_cpu(src->csecs);
-       geo->sos = le16_to_cpu(src->sos);
-
-       pg_per_blk = le16_to_cpu(src->num_pg);
-       sec_per_pg = le16_to_cpu(src->fpg_sz) / geo->csecs;
-       sec_per_pl = sec_per_pg * src->num_pln;
-       geo->clba = sec_per_pl * pg_per_blk;
-
-       geo->all_chunks = geo->all_luns * geo->num_chk;
-       geo->total_secs = geo->clba * geo->all_chunks;
-
-       geo->ws_min = sec_per_pg;
-       geo->ws_opt = sec_per_pg;
-       geo->mw_cunits = geo->ws_opt << 3;      /* default to MLC safe values */
-
-       /* Do not impose values for maximum number of open blocks as it is
-        * unspecified in 1.2. Users of 1.2 must be aware of this and eventually
-        * specify these values through a quirk if restrictions apply.
-        */
-       geo->maxoc = geo->all_luns * geo->num_chk;
-       geo->maxocpu = geo->num_chk;
-
-       geo->mccap = le32_to_cpu(src->mccap);
-
-       geo->trdt = le32_to_cpu(src->trdt);
-       geo->trdm = le32_to_cpu(src->trdm);
-       geo->tprt = le32_to_cpu(src->tprt);
-       geo->tprm = le32_to_cpu(src->tprm);
-       geo->tbet = le32_to_cpu(src->tbet);
-       geo->tbem = le32_to_cpu(src->tbem);
-
-       /* 1.2 compatibility */
-       geo->vmnt = id->vmnt;
-       geo->cap = le32_to_cpu(id->cap);
-       geo->dom = le32_to_cpu(id->dom);
-
-       geo->mtype = src->mtype;
-       geo->fmtype = src->fmtype;
-
-       geo->cpar = le16_to_cpu(src->cpar);
-       geo->mpos = le32_to_cpu(src->mpos);
-
-       geo->pln_mode = NVM_PLANE_SINGLE;
-
-       if (geo->mpos & 0x020202) {
-               geo->pln_mode = NVM_PLANE_DOUBLE;
-               geo->ws_opt <<= 1;
-       } else if (geo->mpos & 0x040404) {
-               geo->pln_mode = NVM_PLANE_QUAD;
-               geo->ws_opt <<= 2;
-       }
-
-       geo->num_pln = src->num_pln;
-       geo->num_pg = le16_to_cpu(src->num_pg);
-       geo->fpg_sz = le16_to_cpu(src->fpg_sz);
-
-       nvme_nvm_set_addr_12((struct nvm_addrf_12 *)&geo->addrf, &id->ppaf);
-
-       return 0;
-}
-
-static void nvme_nvm_set_addr_20(struct nvm_addrf *dst,
-                                struct nvme_nvm_id20_addrf *src)
-{
-       dst->ch_len = src->grp_len;
-       dst->lun_len = src->pu_len;
-       dst->chk_len = src->chk_len;
-       dst->sec_len = src->lba_len;
-
-       dst->sec_offset = 0;
-       dst->chk_offset = dst->sec_len;
-       dst->lun_offset = dst->chk_offset + dst->chk_len;
-       dst->ch_offset = dst->lun_offset + dst->lun_len;
-
-       dst->ch_mask = ((1ULL << dst->ch_len) - 1) << dst->ch_offset;
-       dst->lun_mask = ((1ULL << dst->lun_len) - 1) << dst->lun_offset;
-       dst->chk_mask = ((1ULL << dst->chk_len) - 1) << dst->chk_offset;
-       dst->sec_mask = ((1ULL << dst->sec_len) - 1) << dst->sec_offset;
-}
-
-static int nvme_nvm_setup_20(struct nvme_nvm_id20 *id,
-                            struct nvm_geo *geo)
-{
-       geo->major_ver_id = id->mjr;
-       geo->minor_ver_id = id->mnr;
-
-       /* Set compacted version for upper layers */
-       geo->version = NVM_OCSSD_SPEC_20;
-
-       geo->num_ch = le16_to_cpu(id->num_grp);
-       geo->num_lun = le16_to_cpu(id->num_pu);
-       geo->all_luns = geo->num_ch * geo->num_lun;
-
-       geo->num_chk = le32_to_cpu(id->num_chk);
-       geo->clba = le32_to_cpu(id->clba);
-
-       geo->all_chunks = geo->all_luns * geo->num_chk;
-       geo->total_secs = geo->clba * geo->all_chunks;
-
-       geo->ws_min = le32_to_cpu(id->ws_min);
-       geo->ws_opt = le32_to_cpu(id->ws_opt);
-       geo->mw_cunits = le32_to_cpu(id->mw_cunits);
-       geo->maxoc = le32_to_cpu(id->maxoc);
-       geo->maxocpu = le32_to_cpu(id->maxocpu);
-
-       geo->trdt = le32_to_cpu(id->trdt);
-       geo->trdm = le32_to_cpu(id->trdm);
-       geo->tprt = le32_to_cpu(id->twrt);
-       geo->tprm = le32_to_cpu(id->twrm);
-       geo->tbet = le32_to_cpu(id->tcrst);
-       geo->tbem = le32_to_cpu(id->tcrsm);
-
-       nvme_nvm_set_addr_20(&geo->addrf, &id->lbaf);
-
-       return 0;
-}
-
-static int nvme_nvm_identity(struct nvm_dev *nvmdev)
-{
-       struct nvme_ns *ns = nvmdev->q->queuedata;
-       struct nvme_nvm_id12 *id;
-       struct nvme_nvm_command c = {};
-       int ret;
-
-       c.identity.opcode = nvme_nvm_admin_identity;
-       c.identity.nsid = cpu_to_le32(ns->head->ns_id);
-
-       id = kmalloc(sizeof(struct nvme_nvm_id12), GFP_KERNEL);
-       if (!id)
-               return -ENOMEM;
-
-       ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
-                               id, sizeof(struct nvme_nvm_id12));
-       if (ret) {
-               ret = -EIO;
-               goto out;
-       }
-
-       /*
-        * The 1.2 and 2.0 specifications share the first byte in their geometry
-        * command to make it possible to know what version a device implements.
-        */
-       switch (id->ver_id) {
-       case 1:
-               ret = nvme_nvm_setup_12(id, &nvmdev->geo);
-               break;
-       case 2:
-               ret = nvme_nvm_setup_20((struct nvme_nvm_id20 *)id,
-                                                       &nvmdev->geo);
-               break;
-       default:
-               dev_err(ns->ctrl->device, "OCSSD revision not supported (%d)\n",
-                                                       id->ver_id);
-               ret = -EINVAL;
-       }
-
-out:
-       kfree(id);
-       return ret;
-}
-
-static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
-                                                               u8 *blks)
-{
-       struct request_queue *q = nvmdev->q;
-       struct nvm_geo *geo = &nvmdev->geo;
-       struct nvme_ns *ns = q->queuedata;
-       struct nvme_ctrl *ctrl = ns->ctrl;
-       struct nvme_nvm_command c = {};
-       struct nvme_nvm_bb_tbl *bb_tbl;
-       int nr_blks = geo->num_chk * geo->num_pln;
-       int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blks;
-       int ret = 0;
-
-       c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl;
-       c.get_bb.nsid = cpu_to_le32(ns->head->ns_id);
-       c.get_bb.spba = cpu_to_le64(ppa.ppa);
-
-       bb_tbl = kzalloc(tblsz, GFP_KERNEL);
-       if (!bb_tbl)
-               return -ENOMEM;
-
-       ret = nvme_submit_sync_cmd(ctrl->admin_q, (struct nvme_command *)&c,
-                                                               bb_tbl, tblsz);
-       if (ret) {
-               dev_err(ctrl->device, "get bad block table failed (%d)\n", ret);
-               ret = -EIO;
-               goto out;
-       }
-
-       if (bb_tbl->tblid[0] != 'B' || bb_tbl->tblid[1] != 'B' ||
-               bb_tbl->tblid[2] != 'L' || bb_tbl->tblid[3] != 'T') {
-               dev_err(ctrl->device, "bbt format mismatch\n");
-               ret = -EINVAL;
-               goto out;
-       }
-
-       if (le16_to_cpu(bb_tbl->verid) != 1) {
-               ret = -EINVAL;
-               dev_err(ctrl->device, "bbt version not supported\n");
-               goto out;
-       }
-
-       if (le32_to_cpu(bb_tbl->tblks) != nr_blks) {
-               ret = -EINVAL;
-               dev_err(ctrl->device,
-                               "bbt unsuspected blocks returned (%u!=%u)",
-                               le32_to_cpu(bb_tbl->tblks), nr_blks);
-               goto out;
-       }
-
-       memcpy(blks, bb_tbl->blk, geo->num_chk * geo->num_pln);
-out:
-       kfree(bb_tbl);
-       return ret;
-}
-
-static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr *ppas,
-                                                       int nr_ppas, int type)
-{
-       struct nvme_ns *ns = nvmdev->q->queuedata;
-       struct nvme_nvm_command c = {};
-       int ret = 0;
-
-       c.set_bb.opcode = nvme_nvm_admin_set_bb_tbl;
-       c.set_bb.nsid = cpu_to_le32(ns->head->ns_id);
-       c.set_bb.spba = cpu_to_le64(ppas->ppa);
-       c.set_bb.nlb = cpu_to_le16(nr_ppas - 1);
-       c.set_bb.value = type;
-
-       ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
-                                                               NULL, 0);
-       if (ret)
-               dev_err(ns->ctrl->device, "set bad block table failed (%d)\n",
-                                                                       ret);
-       return ret;
-}
-
-/*
- * Expect the lba in device format
- */
-static int nvme_nvm_get_chk_meta(struct nvm_dev *ndev,
-                                sector_t slba, int nchks,
-                                struct nvm_chk_meta *meta)
-{
-       struct nvm_geo *geo = &ndev->geo;
-       struct nvme_ns *ns = ndev->q->queuedata;
-       struct nvme_ctrl *ctrl = ns->ctrl;
-       struct nvme_nvm_chk_meta *dev_meta, *dev_meta_off;
-       struct ppa_addr ppa;
-       size_t left = nchks * sizeof(struct nvme_nvm_chk_meta);
-       size_t log_pos, offset, len;
-       int i, max_len;
-       int ret = 0;
-
-       /*
-        * limit requests to maximum 256K to avoid issuing arbitrary large
-        * requests when the device does not specific a maximum transfer size.
-        */
-       max_len = min_t(unsigned int, ctrl->max_hw_sectors << 9, 256 * 1024);
-
-       dev_meta = kmalloc(max_len, GFP_KERNEL);
-       if (!dev_meta)
-               return -ENOMEM;
-
-       /* Normalize lba address space to obtain log offset */
-       ppa.ppa = slba;
-       ppa = dev_to_generic_addr(ndev, ppa);
-
-       log_pos = ppa.m.chk;
-       log_pos += ppa.m.pu * geo->num_chk;
-       log_pos += ppa.m.grp * geo->num_lun * geo->num_chk;
-
-       offset = log_pos * sizeof(struct nvme_nvm_chk_meta);
-
-       while (left) {
-               len = min_t(unsigned int, left, max_len);
-
-               memset(dev_meta, 0, max_len);
-               dev_meta_off = dev_meta;
-
-               ret = nvme_get_log(ctrl, ns->head->ns_id,
-                               NVME_NVM_LOG_REPORT_CHUNK, 0, NVME_CSI_NVM,
-                               dev_meta, len, offset);
-               if (ret) {
-                       dev_err(ctrl->device, "Get REPORT CHUNK log error\n");
-                       break;
-               }
-
-               for (i = 0; i < len; i += sizeof(struct nvme_nvm_chk_meta)) {
-                       meta->state = dev_meta_off->state;
-                       meta->type = dev_meta_off->type;
-                       meta->wi = dev_meta_off->wi;
-                       meta->slba = le64_to_cpu(dev_meta_off->slba);
-                       meta->cnlb = le64_to_cpu(dev_meta_off->cnlb);
-                       meta->wp = le64_to_cpu(dev_meta_off->wp);
-
-                       meta++;
-                       dev_meta_off++;
-               }
-
-               offset += len;
-               left -= len;
-       }
-
-       kfree(dev_meta);
-
-       return ret;
-}
-
-static inline void nvme_nvm_rqtocmd(struct nvm_rq *rqd, struct nvme_ns *ns,
-                                   struct nvme_nvm_command *c)
-{
-       c->ph_rw.opcode = rqd->opcode;
-       c->ph_rw.nsid = cpu_to_le32(ns->head->ns_id);
-       c->ph_rw.spba = cpu_to_le64(rqd->ppa_addr.ppa);
-       c->ph_rw.metadata = cpu_to_le64(rqd->dma_meta_list);
-       c->ph_rw.control = cpu_to_le16(rqd->flags);
-       c->ph_rw.length = cpu_to_le16(rqd->nr_ppas - 1);
-}
-
-static void nvme_nvm_end_io(struct request *rq, blk_status_t status)
-{
-       struct nvm_rq *rqd = rq->end_io_data;
-
-       rqd->ppa_status = le64_to_cpu(nvme_req(rq)->result.u64);
-       rqd->error = nvme_req(rq)->status;
-       nvm_end_io(rqd);
-
-       kfree(nvme_req(rq)->cmd);
-       blk_mq_free_request(rq);
-}
-
-static struct request *nvme_nvm_alloc_request(struct request_queue *q,
-                                             struct nvm_rq *rqd,
-                                             struct nvme_nvm_command *cmd)
-{
-       struct nvme_ns *ns = q->queuedata;
-       struct request *rq;
-
-       nvme_nvm_rqtocmd(rqd, ns, cmd);
-
-       rq = nvme_alloc_request(q, (struct nvme_command *)cmd, 0);
-       if (IS_ERR(rq))
-               return rq;
-
-       rq->cmd_flags &= ~REQ_FAILFAST_DRIVER;
-
-       if (rqd->bio)
-               blk_rq_append_bio(rq, rqd->bio);
-       else
-               rq->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
-
-       return rq;
-}
-
-static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd,
-                             void *buf)
-{
-       struct nvm_geo *geo = &dev->geo;
-       struct request_queue *q = dev->q;
-       struct nvme_nvm_command *cmd;
-       struct request *rq;
-       int ret;
-
-       cmd = kzalloc(sizeof(struct nvme_nvm_command), GFP_KERNEL);
-       if (!cmd)
-               return -ENOMEM;
-
-       rq = nvme_nvm_alloc_request(q, rqd, cmd);
-       if (IS_ERR(rq)) {
-               ret = PTR_ERR(rq);
-               goto err_free_cmd;
-       }
-
-       if (buf) {
-               ret = blk_rq_map_kern(q, rq, buf, geo->csecs * rqd->nr_ppas,
-                               GFP_KERNEL);
-               if (ret)
-                       goto err_free_cmd;
-       }
-
-       rq->end_io_data = rqd;
-
-       blk_execute_rq_nowait(NULL, rq, 0, nvme_nvm_end_io);
-
-       return 0;
-
-err_free_cmd:
-       kfree(cmd);
-       return ret;
-}
-
-static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name,
-                                       int size)
-{
-       struct nvme_ns *ns = nvmdev->q->queuedata;
-
-       return dma_pool_create(name, ns->ctrl->dev, size, PAGE_SIZE, 0);
-}
-
-static void nvme_nvm_destroy_dma_pool(void *pool)
-{
-       struct dma_pool *dma_pool = pool;
-
-       dma_pool_destroy(dma_pool);
-}
-
-static void *nvme_nvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
-                                   gfp_t mem_flags, dma_addr_t *dma_handler)
-{
-       return dma_pool_alloc(pool, mem_flags, dma_handler);
-}
-
-static void nvme_nvm_dev_dma_free(void *pool, void *addr,
-                                                       dma_addr_t dma_handler)
-{
-       dma_pool_free(pool, addr, dma_handler);
-}
-
-static struct nvm_dev_ops nvme_nvm_dev_ops = {
-       .identity               = nvme_nvm_identity,
-
-       .get_bb_tbl             = nvme_nvm_get_bb_tbl,
-       .set_bb_tbl             = nvme_nvm_set_bb_tbl,
-
-       .get_chk_meta           = nvme_nvm_get_chk_meta,
-
-       .submit_io              = nvme_nvm_submit_io,
-
-       .create_dma_pool        = nvme_nvm_create_dma_pool,
-       .destroy_dma_pool       = nvme_nvm_destroy_dma_pool,
-       .dev_dma_alloc          = nvme_nvm_dev_dma_alloc,
-       .dev_dma_free           = nvme_nvm_dev_dma_free,
-};
-
-static int nvme_nvm_submit_user_cmd(struct request_queue *q,
-                               struct nvme_ns *ns,
-                               struct nvme_nvm_command *vcmd,
-                               void __user *ubuf, unsigned int bufflen,
-                               void __user *meta_buf, unsigned int meta_len,
-                               void __user *ppa_buf, unsigned int ppa_len,
-                               u32 *result, u64 *status, unsigned int timeout)
-{
-       bool write = nvme_is_write((struct nvme_command *)vcmd);
-       struct nvm_dev *dev = ns->ndev;
-       struct request *rq;
-       struct bio *bio = NULL;
-       __le64 *ppa_list = NULL;
-       dma_addr_t ppa_dma;
-       __le64 *metadata = NULL;
-       dma_addr_t metadata_dma;
-       DECLARE_COMPLETION_ONSTACK(wait);
-       int ret = 0;
-
-       rq = nvme_alloc_request(q, (struct nvme_command *)vcmd, 0);
-       if (IS_ERR(rq)) {
-               ret = -ENOMEM;
-               goto err_cmd;
-       }
-
-       if (timeout)
-               rq->timeout = timeout;
-
-       if (ppa_buf && ppa_len) {
-               ppa_list = dma_pool_alloc(dev->dma_pool, GFP_KERNEL, &ppa_dma);
-               if (!ppa_list) {
-                       ret = -ENOMEM;
-                       goto err_rq;
-               }
-               if (copy_from_user(ppa_list, (void __user *)ppa_buf,
-                                               sizeof(u64) * (ppa_len + 1))) {
-                       ret = -EFAULT;
-                       goto err_ppa;
-               }
-               vcmd->ph_rw.spba = cpu_to_le64(ppa_dma);
-       } else {
-               vcmd->ph_rw.spba = cpu_to_le64((uintptr_t)ppa_buf);
-       }
-
-       if (ubuf && bufflen) {
-               ret = blk_rq_map_user(q, rq, NULL, ubuf, bufflen, GFP_KERNEL);
-               if (ret)
-                       goto err_ppa;
-               bio = rq->bio;
-
-               if (meta_buf && meta_len) {
-                       metadata = dma_pool_alloc(dev->dma_pool, GFP_KERNEL,
-                                                               &metadata_dma);
-                       if (!metadata) {
-                               ret = -ENOMEM;
-                               goto err_map;
-                       }
-
-                       if (write) {
-                               if (copy_from_user(metadata,
-                                               (void __user *)meta_buf,
-                                               meta_len)) {
-                                       ret = -EFAULT;
-                                       goto err_meta;
-                               }
-                       }
-                       vcmd->ph_rw.metadata = cpu_to_le64(metadata_dma);
-               }
-
-               bio_set_dev(bio, ns->disk->part0);
-       }
-
-       blk_execute_rq(NULL, rq, 0);
-
-       if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
-               ret = -EINTR;
-       else if (nvme_req(rq)->status & 0x7ff)
-               ret = -EIO;
-       if (result)
-               *result = nvme_req(rq)->status & 0x7ff;
-       if (status)
-               *status = le64_to_cpu(nvme_req(rq)->result.u64);
-
-       if (metadata && !ret && !write) {
-               if (copy_to_user(meta_buf, (void *)metadata, meta_len))
-                       ret = -EFAULT;
-       }
-err_meta:
-       if (meta_buf && meta_len)
-               dma_pool_free(dev->dma_pool, metadata, metadata_dma);
-err_map:
-       if (bio)
-               blk_rq_unmap_user(bio);
-err_ppa:
-       if (ppa_buf && ppa_len)
-               dma_pool_free(dev->dma_pool, ppa_list, ppa_dma);
-err_rq:
-       blk_mq_free_request(rq);
-err_cmd:
-       return ret;
-}
-
-static int nvme_nvm_submit_vio(struct nvme_ns *ns,
-                                       struct nvm_user_vio __user *uvio)
-{
-       struct nvm_user_vio vio;
-       struct nvme_nvm_command c;
-       unsigned int length;
-       int ret;
-
-       if (copy_from_user(&vio, uvio, sizeof(vio)))
-               return -EFAULT;
-       if (vio.flags)
-               return -EINVAL;
-
-       memset(&c, 0, sizeof(c));
-       c.ph_rw.opcode = vio.opcode;
-       c.ph_rw.nsid = cpu_to_le32(ns->head->ns_id);
-       c.ph_rw.control = cpu_to_le16(vio.control);
-       c.ph_rw.length = cpu_to_le16(vio.nppas);
-
-       length = (vio.nppas + 1) << ns->lba_shift;
-
-       ret = nvme_nvm_submit_user_cmd(ns->queue, ns, &c,
-                       (void __user *)(uintptr_t)vio.addr, length,
-                       (void __user *)(uintptr_t)vio.metadata,
-                                                       vio.metadata_len,
-                       (void __user *)(uintptr_t)vio.ppa_list, vio.nppas,
-                       &vio.result, &vio.status, 0);
-
-       if (ret && copy_to_user(uvio, &vio, sizeof(vio)))
-               return -EFAULT;
-
-       return ret;
-}
-
-static int nvme_nvm_user_vcmd(struct nvme_ns *ns, int admin,
-                                       struct nvm_passthru_vio __user *uvcmd)
-{
-       struct nvm_passthru_vio vcmd;
-       struct nvme_nvm_command c;
-       struct request_queue *q;
-       unsigned int timeout = 0;
-       int ret;
-
-       if (copy_from_user(&vcmd, uvcmd, sizeof(vcmd)))
-               return -EFAULT;
-       if ((vcmd.opcode != 0xF2) && (!capable(CAP_SYS_ADMIN)))
-               return -EACCES;
-       if (vcmd.flags)
-               return -EINVAL;
-
-       memset(&c, 0, sizeof(c));
-       c.common.opcode = vcmd.opcode;
-       c.common.nsid = cpu_to_le32(ns->head->ns_id);
-       c.common.cdw2[0] = cpu_to_le32(vcmd.cdw2);
-       c.common.cdw2[1] = cpu_to_le32(vcmd.cdw3);
-       /* cdw11-12 */
-       c.ph_rw.length = cpu_to_le16(vcmd.nppas);
-       c.ph_rw.control  = cpu_to_le16(vcmd.control);
-       c.common.cdw13 = cpu_to_le32(vcmd.cdw13);
-       c.common.cdw14 = cpu_to_le32(vcmd.cdw14);
-       c.common.cdw15 = cpu_to_le32(vcmd.cdw15);
-
-       if (vcmd.timeout_ms)
-               timeout = msecs_to_jiffies(vcmd.timeout_ms);
-
-       q = admin ? ns->ctrl->admin_q : ns->queue;
-
-       ret = nvme_nvm_submit_user_cmd(q, ns,
-                       (struct nvme_nvm_command *)&c,
-                       (void __user *)(uintptr_t)vcmd.addr, vcmd.data_len,
-                       (void __user *)(uintptr_t)vcmd.metadata,
-                                                       vcmd.metadata_len,
-                       (void __user *)(uintptr_t)vcmd.ppa_list, vcmd.nppas,
-                       &vcmd.result, &vcmd.status, timeout);
-
-       if (ret && copy_to_user(uvcmd, &vcmd, sizeof(vcmd)))
-               return -EFAULT;
-
-       return ret;
-}
-
-int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, void __user *argp)
-{
-       switch (cmd) {
-       case NVME_NVM_IOCTL_ADMIN_VIO:
-               return nvme_nvm_user_vcmd(ns, 1, argp);
-       case NVME_NVM_IOCTL_IO_VIO:
-               return nvme_nvm_user_vcmd(ns, 0, argp);
-       case NVME_NVM_IOCTL_SUBMIT_VIO:
-               return nvme_nvm_submit_vio(ns, argp);
-       default:
-               return -ENOTTY;
-       }
-}
-
-int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node)
-{
-       struct request_queue *q = ns->queue;
-       struct nvm_dev *dev;
-       struct nvm_geo *geo;
-
-       _nvme_nvm_check_size();
-
-       dev = nvm_alloc_dev(node);
-       if (!dev)
-               return -ENOMEM;
-
-       /* Note that csecs and sos will be overridden if it is a 1.2 drive. */
-       geo = &dev->geo;
-       geo->csecs = 1 << ns->lba_shift;
-       geo->sos = ns->ms;
-       if (ns->features & NVME_NS_EXT_LBAS)
-               geo->ext = true;
-       else
-               geo->ext = false;
-       geo->mdts = ns->ctrl->max_hw_sectors;
-
-       dev->q = q;
-       memcpy(dev->name, disk_name, DISK_NAME_LEN);
-       dev->ops = &nvme_nvm_dev_ops;
-       dev->private_data = ns;
-       ns->ndev = dev;
-
-       return nvm_register(dev);
-}
-
-void nvme_nvm_unregister(struct nvme_ns *ns)
-{
-       nvm_unregister(ns->ndev);
-}
-
-static ssize_t nvm_dev_attr_show(struct device *dev,
-               struct device_attribute *dattr, char *page)
-{
-       struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
-       struct nvm_dev *ndev = ns->ndev;
-       struct nvm_geo *geo = &ndev->geo;
-       struct attribute *attr;
-
-       if (!ndev)
-               return 0;
-
-       attr = &dattr->attr;
-
-       if (strcmp(attr->name, "version") == 0) {
-               if (geo->major_ver_id == 1)
-                       return scnprintf(page, PAGE_SIZE, "%u\n",
-                                               geo->major_ver_id);
-               else
-                       return scnprintf(page, PAGE_SIZE, "%u.%u\n",
-                                               geo->major_ver_id,
-                                               geo->minor_ver_id);
-       } else if (strcmp(attr->name, "capabilities") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->cap);
-       } else if (strcmp(attr->name, "read_typ") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->trdt);
-       } else if (strcmp(attr->name, "read_max") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->trdm);
-       } else {
-               return scnprintf(page,
-                                PAGE_SIZE,
-                                "Unhandled attr(%s) in `%s`\n",
-                                attr->name, __func__);
-       }
-}
-
-static ssize_t nvm_dev_attr_show_ppaf(struct nvm_addrf_12 *ppaf, char *page)
-{
-       return scnprintf(page, PAGE_SIZE,
-               "0x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
-                               ppaf->ch_offset, ppaf->ch_len,
-                               ppaf->lun_offset, ppaf->lun_len,
-                               ppaf->pln_offset, ppaf->pln_len,
-                               ppaf->blk_offset, ppaf->blk_len,
-                               ppaf->pg_offset, ppaf->pg_len,
-                               ppaf->sec_offset, ppaf->sec_len);
-}
-
-static ssize_t nvm_dev_attr_show_12(struct device *dev,
-               struct device_attribute *dattr, char *page)
-{
-       struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
-       struct nvm_dev *ndev = ns->ndev;
-       struct nvm_geo *geo = &ndev->geo;
-       struct attribute *attr;
-
-       if (!ndev)
-               return 0;
-
-       attr = &dattr->attr;
-
-       if (strcmp(attr->name, "vendor_opcode") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->vmnt);
-       } else if (strcmp(attr->name, "device_mode") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->dom);
-       /* kept for compatibility */
-       } else if (strcmp(attr->name, "media_manager") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%s\n", "gennvm");
-       } else if (strcmp(attr->name, "ppa_format") == 0) {
-               return nvm_dev_attr_show_ppaf((void *)&geo->addrf, page);
-       } else if (strcmp(attr->name, "media_type") == 0) {     /* u8 */
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->mtype);
-       } else if (strcmp(attr->name, "flash_media_type") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->fmtype);
-       } else if (strcmp(attr->name, "num_channels") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_ch);
-       } else if (strcmp(attr->name, "num_luns") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_lun);
-       } else if (strcmp(attr->name, "num_planes") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_pln);
-       } else if (strcmp(attr->name, "num_blocks") == 0) {     /* u16 */
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_chk);
-       } else if (strcmp(attr->name, "num_pages") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_pg);
-       } else if (strcmp(attr->name, "page_size") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->fpg_sz);
-       } else if (strcmp(attr->name, "hw_sector_size") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->csecs);
-       } else if (strcmp(attr->name, "oob_sector_size") == 0) {/* u32 */
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->sos);
-       } else if (strcmp(attr->name, "prog_typ") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->tprt);
-       } else if (strcmp(attr->name, "prog_max") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->tprm);
-       } else if (strcmp(attr->name, "erase_typ") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->tbet);
-       } else if (strcmp(attr->name, "erase_max") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->tbem);
-       } else if (strcmp(attr->name, "multiplane_modes") == 0) {
-               return scnprintf(page, PAGE_SIZE, "0x%08x\n", geo->mpos);
-       } else if (strcmp(attr->name, "media_capabilities") == 0) {
-               return scnprintf(page, PAGE_SIZE, "0x%08x\n", geo->mccap);
-       } else if (strcmp(attr->name, "max_phys_secs") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", NVM_MAX_VLBA);
-       } else {
-               return scnprintf(page, PAGE_SIZE,
-                       "Unhandled attr(%s) in `%s`\n",
-                       attr->name, __func__);
-       }
-}
-
-static ssize_t nvm_dev_attr_show_20(struct device *dev,
-               struct device_attribute *dattr, char *page)
-{
-       struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
-       struct nvm_dev *ndev = ns->ndev;
-       struct nvm_geo *geo = &ndev->geo;
-       struct attribute *attr;
-
-       if (!ndev)
-               return 0;
-
-       attr = &dattr->attr;
-
-       if (strcmp(attr->name, "groups") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_ch);
-       } else if (strcmp(attr->name, "punits") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_lun);
-       } else if (strcmp(attr->name, "chunks") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_chk);
-       } else if (strcmp(attr->name, "clba") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->clba);
-       } else if (strcmp(attr->name, "ws_min") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->ws_min);
-       } else if (strcmp(attr->name, "ws_opt") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->ws_opt);
-       } else if (strcmp(attr->name, "maxoc") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->maxoc);
-       } else if (strcmp(attr->name, "maxocpu") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->maxocpu);
-       } else if (strcmp(attr->name, "mw_cunits") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->mw_cunits);
-       } else if (strcmp(attr->name, "write_typ") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->tprt);
-       } else if (strcmp(attr->name, "write_max") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->tprm);
-       } else if (strcmp(attr->name, "reset_typ") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->tbet);
-       } else if (strcmp(attr->name, "reset_max") == 0) {
-               return scnprintf(page, PAGE_SIZE, "%u\n", geo->tbem);
-       } else {
-               return scnprintf(page, PAGE_SIZE,
-                       "Unhandled attr(%s) in `%s`\n",
-                       attr->name, __func__);
-       }
-}
-
-#define NVM_DEV_ATTR_RO(_name)                                 \
-       DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show, NULL)
-#define NVM_DEV_ATTR_12_RO(_name)                                      \
-       DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show_12, NULL)
-#define NVM_DEV_ATTR_20_RO(_name)                                      \
-       DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show_20, NULL)
-
-/* general attributes */
-static NVM_DEV_ATTR_RO(version);
-static NVM_DEV_ATTR_RO(capabilities);
-
-static NVM_DEV_ATTR_RO(read_typ);
-static NVM_DEV_ATTR_RO(read_max);
-
-/* 1.2 values */
-static NVM_DEV_ATTR_12_RO(vendor_opcode);
-static NVM_DEV_ATTR_12_RO(device_mode);
-static NVM_DEV_ATTR_12_RO(ppa_format);
-static NVM_DEV_ATTR_12_RO(media_manager);
-static NVM_DEV_ATTR_12_RO(media_type);
-static NVM_DEV_ATTR_12_RO(flash_media_type);
-static NVM_DEV_ATTR_12_RO(num_channels);
-static NVM_DEV_ATTR_12_RO(num_luns);
-static NVM_DEV_ATTR_12_RO(num_planes);
-static NVM_DEV_ATTR_12_RO(num_blocks);
-static NVM_DEV_ATTR_12_RO(num_pages);
-static NVM_DEV_ATTR_12_RO(page_size);
-static NVM_DEV_ATTR_12_RO(hw_sector_size);
-static NVM_DEV_ATTR_12_RO(oob_sector_size);
-static NVM_DEV_ATTR_12_RO(prog_typ);
-static NVM_DEV_ATTR_12_RO(prog_max);
-static NVM_DEV_ATTR_12_RO(erase_typ);
-static NVM_DEV_ATTR_12_RO(erase_max);
-static NVM_DEV_ATTR_12_RO(multiplane_modes);
-static NVM_DEV_ATTR_12_RO(media_capabilities);
-static NVM_DEV_ATTR_12_RO(max_phys_secs);
-
-/* 2.0 values */
-static NVM_DEV_ATTR_20_RO(groups);
-static NVM_DEV_ATTR_20_RO(punits);
-static NVM_DEV_ATTR_20_RO(chunks);
-static NVM_DEV_ATTR_20_RO(clba);
-static NVM_DEV_ATTR_20_RO(ws_min);
-static NVM_DEV_ATTR_20_RO(ws_opt);
-static NVM_DEV_ATTR_20_RO(maxoc);
-static NVM_DEV_ATTR_20_RO(maxocpu);
-static NVM_DEV_ATTR_20_RO(mw_cunits);
-static NVM_DEV_ATTR_20_RO(write_typ);
-static NVM_DEV_ATTR_20_RO(write_max);
-static NVM_DEV_ATTR_20_RO(reset_typ);
-static NVM_DEV_ATTR_20_RO(reset_max);
-
-static struct attribute *nvm_dev_attrs[] = {
-       /* version agnostic attrs */
-       &dev_attr_version.attr,
-       &dev_attr_capabilities.attr,
-       &dev_attr_read_typ.attr,
-       &dev_attr_read_max.attr,
-
-       /* 1.2 attrs */
-       &dev_attr_vendor_opcode.attr,
-       &dev_attr_device_mode.attr,
-       &dev_attr_media_manager.attr,
-       &dev_attr_ppa_format.attr,
-       &dev_attr_media_type.attr,
-       &dev_attr_flash_media_type.attr,
-       &dev_attr_num_channels.attr,
-       &dev_attr_num_luns.attr,
-       &dev_attr_num_planes.attr,
-       &dev_attr_num_blocks.attr,
-       &dev_attr_num_pages.attr,
-       &dev_attr_page_size.attr,
-       &dev_attr_hw_sector_size.attr,
-       &dev_attr_oob_sector_size.attr,
-       &dev_attr_prog_typ.attr,
-       &dev_attr_prog_max.attr,
-       &dev_attr_erase_typ.attr,
-       &dev_attr_erase_max.attr,
-       &dev_attr_multiplane_modes.attr,
-       &dev_attr_media_capabilities.attr,
-       &dev_attr_max_phys_secs.attr,
-
-       /* 2.0 attrs */
-       &dev_attr_groups.attr,
-       &dev_attr_punits.attr,
-       &dev_attr_chunks.attr,
-       &dev_attr_clba.attr,
-       &dev_attr_ws_min.attr,
-       &dev_attr_ws_opt.attr,
-       &dev_attr_maxoc.attr,
-       &dev_attr_maxocpu.attr,
-       &dev_attr_mw_cunits.attr,
-
-       &dev_attr_write_typ.attr,
-       &dev_attr_write_max.attr,
-       &dev_attr_reset_typ.attr,
-       &dev_attr_reset_max.attr,
-
-       NULL,
-};
-
-static umode_t nvm_dev_attrs_visible(struct kobject *kobj,
-                                    struct attribute *attr, int index)
-{
-       struct device *dev = kobj_to_dev(kobj);
-       struct gendisk *disk = dev_to_disk(dev);
-       struct nvme_ns *ns = disk->private_data;
-       struct nvm_dev *ndev = ns->ndev;
-       struct device_attribute *dev_attr =
-               container_of(attr, typeof(*dev_attr), attr);
-
-       if (!ndev)
-               return 0;
-
-       if (dev_attr->show == nvm_dev_attr_show)
-               return attr->mode;
-
-       switch (ndev->geo.major_ver_id) {
-       case 1:
-               if (dev_attr->show == nvm_dev_attr_show_12)
-                       return attr->mode;
-               break;
-       case 2:
-               if (dev_attr->show == nvm_dev_attr_show_20)
-                       return attr->mode;
-               break;
-       }
-
-       return 0;
-}
-
-const struct attribute_group nvme_nvm_attr_group = {
-       .name           = "lightnvm",
-       .attrs          = nvm_dev_attrs,
-       .is_visible     = nvm_dev_attrs_visible,
-};
index 3f32c5e..37ce3e8 100644 (file)
@@ -765,7 +765,7 @@ void nvme_mpath_shutdown_disk(struct nvme_ns_head *head)
        if (!head->disk)
                return;
        kblockd_schedule_work(&head->requeue_work);
-       if (head->disk->flags & GENHD_FL_UP) {
+       if (test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
                nvme_cdev_del(&head->cdev, &head->cdev_device);
                del_gendisk(head->disk);
        }
index 5cd1fa3..a2e1f29 100644 (file)
@@ -11,7 +11,6 @@
 #include <linux/pci.h>
 #include <linux/kref.h>
 #include <linux/blk-mq.h>
-#include <linux/lightnvm.h>
 #include <linux/sed-opal.h>
 #include <linux/fault-inject.h>
 #include <linux/rcupdate.h>
@@ -48,11 +47,6 @@ extern struct workqueue_struct *nvme_wq;
 extern struct workqueue_struct *nvme_reset_wq;
 extern struct workqueue_struct *nvme_delete_wq;
 
-enum {
-       NVME_NS_LBA             = 0,
-       NVME_NS_LIGHTNVM        = 1,
-};
-
 /*
  * List of workarounds for devices that required behavior not specified in
  * the standard.
@@ -92,11 +86,6 @@ enum nvme_quirks {
         */
        NVME_QUIRK_NO_DEEPEST_PS                = (1 << 5),
 
-       /*
-        * Supports the LighNVM command set if indicated in vs[1].
-        */
-       NVME_QUIRK_LIGHTNVM                     = (1 << 6),
-
        /*
         * Set MEDIUM priority on SQ creation
         */
@@ -158,6 +147,7 @@ enum nvme_quirks {
 struct nvme_request {
        struct nvme_command     *cmd;
        union nvme_result       result;
+       u8                      genctr;
        u8                      retries;
        u8                      flags;
        u16                     status;
@@ -449,7 +439,6 @@ struct nvme_ns {
        u32 ana_grpid;
 #endif
        struct list_head siblings;
-       struct nvm_dev *ndev;
        struct kref kref;
        struct nvme_ns_head *head;
 
@@ -497,6 +486,49 @@ struct nvme_ctrl_ops {
        int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
 };
 
+/*
+ * nvme command_id is constructed as such:
+ * | xxxx | xxxxxxxxxxxx |
+ *   gen    request tag
+ */
+#define nvme_genctr_mask(gen)                  (gen & 0xf)
+#define nvme_cid_install_genctr(gen)           (nvme_genctr_mask(gen) << 12)
+#define nvme_genctr_from_cid(cid)              ((cid & 0xf000) >> 12)
+#define nvme_tag_from_cid(cid)                 (cid & 0xfff)
+
+static inline u16 nvme_cid(struct request *rq)
+{
+       return nvme_cid_install_genctr(nvme_req(rq)->genctr) | rq->tag;
+}
+
+static inline struct request *nvme_find_rq(struct blk_mq_tags *tags,
+               u16 command_id)
+{
+       u8 genctr = nvme_genctr_from_cid(command_id);
+       u16 tag = nvme_tag_from_cid(command_id);
+       struct request *rq;
+
+       rq = blk_mq_tag_to_rq(tags, tag);
+       if (unlikely(!rq)) {
+               pr_err("could not locate request for tag %#x\n",
+                       tag);
+               return NULL;
+       }
+       if (unlikely(nvme_genctr_mask(nvme_req(rq)->genctr) != genctr)) {
+               dev_err(nvme_req(rq)->ctrl->device,
+                       "request %#x genctr mismatch (got %#x expected %#x)\n",
+                       tag, genctr, nvme_genctr_mask(nvme_req(rq)->genctr));
+               return NULL;
+       }
+       return rq;
+}
+
+static inline struct request *nvme_cid_to_rq(struct blk_mq_tags *tags,
+                u16 command_id)
+{
+       return blk_mq_tag_to_rq(tags, nvme_tag_from_cid(command_id));
+}
+
 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
 void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj,
                            const char *dev_name);
@@ -594,7 +626,8 @@ static inline void nvme_put_ctrl(struct nvme_ctrl *ctrl)
 
 static inline bool nvme_is_aen_req(u16 qid, __u16 command_id)
 {
-       return !qid && command_id >= NVME_AQ_BLK_MQ_DEPTH;
+       return !qid &&
+               nvme_tag_from_cid(command_id) >= NVME_AQ_BLK_MQ_DEPTH;
 }
 
 void nvme_complete_rq(struct request *req);
@@ -823,26 +856,6 @@ static inline int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
 }
 #endif
 
-#ifdef CONFIG_NVM
-int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node);
-void nvme_nvm_unregister(struct nvme_ns *ns);
-extern const struct attribute_group nvme_nvm_attr_group;
-int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, void __user *argp);
-#else
-static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name,
-                                   int node)
-{
-       return 0;
-}
-
-static inline void nvme_nvm_unregister(struct nvme_ns *ns) {};
-static inline int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd,
-               void __user *argp)
-{
-       return -ENOTTY;
-}
-#endif /* CONFIG_NVM */
-
 static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
 {
        return dev_to_disk(dev)->private_data;
index 5185208..b82492c 100644 (file)
@@ -60,6 +60,8 @@ MODULE_PARM_DESC(sgl_threshold,
                "Use SGLs when average request segment size is larger or equal to "
                "this size. Use 0 to disable SGLs.");
 
+#define NVME_PCI_MIN_QUEUE_SIZE 2
+#define NVME_PCI_MAX_QUEUE_SIZE 4095
 static int io_queue_depth_set(const char *val, const struct kernel_param *kp);
 static const struct kernel_param_ops io_queue_depth_ops = {
        .set = io_queue_depth_set,
@@ -68,7 +70,7 @@ static const struct kernel_param_ops io_queue_depth_ops = {
 
 static unsigned int io_queue_depth = 1024;
 module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644);
-MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2");
+MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2 and < 4096");
 
 static int io_queue_count_set(const char *val, const struct kernel_param *kp)
 {
@@ -135,6 +137,7 @@ struct nvme_dev {
        u32 cmbloc;
        struct nvme_ctrl ctrl;
        u32 last_ps;
+       bool hmb;
 
        mempool_t *iod_mempool;
 
@@ -153,18 +156,14 @@ struct nvme_dev {
        unsigned int nr_allocated_queues;
        unsigned int nr_write_queues;
        unsigned int nr_poll_queues;
+
+       bool attrs_added;
 };
 
 static int io_queue_depth_set(const char *val, const struct kernel_param *kp)
 {
-       int ret;
-       u32 n;
-
-       ret = kstrtou32(val, 10, &n);
-       if (ret != 0 || n < 2)
-               return -EINVAL;
-
-       return param_set_uint(val, kp);
+       return param_set_uint_minmax(val, kp, NVME_PCI_MIN_QUEUE_SIZE,
+                       NVME_PCI_MAX_QUEUE_SIZE);
 }
 
 static inline unsigned int sq_idx(unsigned int qid, u32 stride)
@@ -1014,7 +1013,7 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
                return;
        }
 
-       req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), command_id);
+       req = nvme_find_rq(nvme_queue_tagset(nvmeq), command_id);
        if (unlikely(!req)) {
                dev_warn(nvmeq->dev->ctrl.device,
                        "invalid id %d completed on queue %d\n",
@@ -1808,17 +1807,6 @@ static int nvme_create_io_queues(struct nvme_dev *dev)
        return ret >= 0 ? 0 : ret;
 }
 
-static ssize_t nvme_cmb_show(struct device *dev,
-                            struct device_attribute *attr,
-                            char *buf)
-{
-       struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
-
-       return scnprintf(buf, PAGE_SIZE, "cmbloc : x%08x\ncmbsz  : x%08x\n",
-                      ndev->cmbloc, ndev->cmbsz);
-}
-static DEVICE_ATTR(cmb, S_IRUGO, nvme_cmb_show, NULL);
-
 static u64 nvme_cmb_size_unit(struct nvme_dev *dev)
 {
        u8 szu = (dev->cmbsz >> NVME_CMBSZ_SZU_SHIFT) & NVME_CMBSZ_SZU_MASK;
@@ -1887,20 +1875,6 @@ static void nvme_map_cmb(struct nvme_dev *dev)
        if ((dev->cmbsz & (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) ==
                        (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS))
                pci_p2pmem_publish(pdev, true);
-
-       if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
-                                   &dev_attr_cmb.attr, NULL))
-               dev_warn(dev->ctrl.device,
-                        "failed to add sysfs attribute for CMB\n");
-}
-
-static inline void nvme_release_cmb(struct nvme_dev *dev)
-{
-       if (dev->cmb_size) {
-               sysfs_remove_file_from_group(&dev->ctrl.device->kobj,
-                                            &dev_attr_cmb.attr, NULL);
-               dev->cmb_size = 0;
-       }
 }
 
 static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
@@ -1923,7 +1897,9 @@ static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
                dev_warn(dev->ctrl.device,
                         "failed to set host mem (err %d, flags %#x).\n",
                         ret, bits);
-       }
+       } else
+               dev->hmb = bits & NVME_HOST_MEM_ENABLE;
+
        return ret;
 }
 
@@ -2080,6 +2056,102 @@ static int nvme_setup_host_mem(struct nvme_dev *dev)
        return ret;
 }
 
+static ssize_t cmb_show(struct device *dev, struct device_attribute *attr,
+               char *buf)
+{
+       struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
+
+       return sysfs_emit(buf, "cmbloc : x%08x\ncmbsz  : x%08x\n",
+                      ndev->cmbloc, ndev->cmbsz);
+}
+static DEVICE_ATTR_RO(cmb);
+
+static ssize_t cmbloc_show(struct device *dev, struct device_attribute *attr,
+               char *buf)
+{
+       struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
+
+       return sysfs_emit(buf, "%u\n", ndev->cmbloc);
+}
+static DEVICE_ATTR_RO(cmbloc);
+
+static ssize_t cmbsz_show(struct device *dev, struct device_attribute *attr,
+               char *buf)
+{
+       struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
+
+       return sysfs_emit(buf, "%u\n", ndev->cmbsz);
+}
+static DEVICE_ATTR_RO(cmbsz);
+
+static ssize_t hmb_show(struct device *dev, struct device_attribute *attr,
+                       char *buf)
+{
+       struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
+
+       return sysfs_emit(buf, "%d\n", ndev->hmb);
+}
+
+static ssize_t hmb_store(struct device *dev, struct device_attribute *attr,
+                        const char *buf, size_t count)
+{
+       struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
+       bool new;
+       int ret;
+
+       if (strtobool(buf, &new) < 0)
+               return -EINVAL;
+
+       if (new == ndev->hmb)
+               return count;
+
+       if (new) {
+               ret = nvme_setup_host_mem(ndev);
+       } else {
+               ret = nvme_set_host_mem(ndev, 0);
+               if (!ret)
+                       nvme_free_host_mem(ndev);
+       }
+
+       if (ret < 0)
+               return ret;
+
+       return count;
+}
+static DEVICE_ATTR_RW(hmb);
+
+static umode_t nvme_pci_attrs_are_visible(struct kobject *kobj,
+               struct attribute *a, int n)
+{
+       struct nvme_ctrl *ctrl =
+               dev_get_drvdata(container_of(kobj, struct device, kobj));
+       struct nvme_dev *dev = to_nvme_dev(ctrl);
+
+       if (a == &dev_attr_cmb.attr ||
+           a == &dev_attr_cmbloc.attr ||
+           a == &dev_attr_cmbsz.attr) {
+               if (!dev->cmbsz)
+                       return 0;
+       }
+       if (a == &dev_attr_hmb.attr && !ctrl->hmpre)
+               return 0;
+
+       return a->mode;
+}
+
+static struct attribute *nvme_pci_attrs[] = {
+       &dev_attr_cmb.attr,
+       &dev_attr_cmbloc.attr,
+       &dev_attr_cmbsz.attr,
+       &dev_attr_hmb.attr,
+       NULL,
+};
+
+static const struct attribute_group nvme_pci_attr_group = {
+       .attrs          = nvme_pci_attrs,
+       .is_visible     = nvme_pci_attrs_are_visible,
+};
+
 /*
  * nirqs is the number of interrupts available for write and read
  * queues. The core already reserved an interrupt for the admin queue.
@@ -2751,6 +2823,10 @@ static void nvme_reset_work(struct work_struct *work)
                goto out;
        }
 
+       if (!dev->attrs_added && !sysfs_create_group(&dev->ctrl.device->kobj,
+                       &nvme_pci_attr_group))
+               dev->attrs_added = true;
+
        nvme_start_ctrl(&dev->ctrl);
        return;
 
@@ -2999,6 +3075,13 @@ static void nvme_shutdown(struct pci_dev *pdev)
        nvme_disable_prepare_reset(dev, true);
 }
 
+static void nvme_remove_attrs(struct nvme_dev *dev)
+{
+       if (dev->attrs_added)
+               sysfs_remove_group(&dev->ctrl.device->kobj,
+                                  &nvme_pci_attr_group);
+}
+
 /*
  * The driver's remove may be called on a device in a partially initialized
  * state. This function must not have any dependencies on the device state in
@@ -3020,7 +3103,7 @@ static void nvme_remove(struct pci_dev *pdev)
        nvme_stop_ctrl(&dev->ctrl);
        nvme_remove_namespaces(&dev->ctrl);
        nvme_dev_disable(dev, true);
-       nvme_release_cmb(dev);
+       nvme_remove_attrs(dev);
        nvme_free_host_mem(dev);
        nvme_dev_remove_admin(dev);
        nvme_free_queues(dev, 0);
@@ -3047,8 +3130,13 @@ static int nvme_resume(struct device *dev)
 
        if (ndev->last_ps == U32_MAX ||
            nvme_set_power_state(ctrl, ndev->last_ps) != 0)
-               return nvme_try_sched_reset(&ndev->ctrl);
+               goto reset;
+       if (ctrl->hmpre && nvme_setup_host_mem(ndev))
+               goto reset;
+
        return 0;
+reset:
+       return nvme_try_sched_reset(ctrl);
 }
 
 static int nvme_suspend(struct device *dev)
@@ -3072,15 +3160,9 @@ static int nvme_suspend(struct device *dev)
         * the PCI bus layer to put it into D3 in order to take the PCIe link
         * down, so as to allow the platform to achieve its minimum low-power
         * state (which may not be possible if the link is up).
-        *
-        * If a host memory buffer is enabled, shut down the device as the NVMe
-        * specification allows the device to access the host memory buffer in
-        * host DRAM from all power states, but hosts will fail access to DRAM
-        * during S3.
         */
        if (pm_suspend_via_firmware() || !ctrl->npss ||
            !pcie_aspm_enabled(pdev) ||
-           ndev->nr_host_mem_descs ||
            (ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND))
                return nvme_disable_prepare_reset(ndev, true);
 
@@ -3091,6 +3173,17 @@ static int nvme_suspend(struct device *dev)
        if (ctrl->state != NVME_CTRL_LIVE)
                goto unfreeze;
 
+       /*
+        * Host memory access may not be successful in a system suspend state,
+        * but the specification allows the controller to access memory in a
+        * non-operational power state.
+        */
+       if (ndev->hmb) {
+               ret = nvme_set_host_mem(ndev, 0);
+               if (ret < 0)
+                       goto unfreeze;
+       }
+
        ret = nvme_get_power_state(ctrl, &ndev->last_ps);
        if (ret < 0)
                goto unfreeze;
@@ -3243,12 +3336,6 @@ static const struct pci_device_id nvme_id_table[] = {
        { PCI_DEVICE(0x1b4b, 0x1092),   /* Lexar 256 GB SSD */
                .driver_data = NVME_QUIRK_NO_NS_DESC_LIST |
                                NVME_QUIRK_IGNORE_DEV_SUBNQN, },
-       { PCI_DEVICE(0x1d1d, 0x1f1f),   /* LighNVM qemu device */
-               .driver_data = NVME_QUIRK_LIGHTNVM, },
-       { PCI_DEVICE(0x1d1d, 0x2807),   /* CNEX WL */
-               .driver_data = NVME_QUIRK_LIGHTNVM, },
-       { PCI_DEVICE(0x1d1d, 0x2601),   /* CNEX Granby */
-               .driver_data = NVME_QUIRK_LIGHTNVM, },
        { PCI_DEVICE(0x10ec, 0x5762),   /* ADATA SX6000LNP */
                .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
        { PCI_DEVICE(0x1cc1, 0x8201),   /* ADATA SX8200PNP 512GB */
index 7f6b3a9..a68704e 100644 (file)
@@ -735,13 +735,13 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
        if (ret)
                return ret;
 
-       ctrl->ctrl.queue_count = nr_io_queues + 1;
-       if (ctrl->ctrl.queue_count < 2) {
+       if (nr_io_queues == 0) {
                dev_err(ctrl->ctrl.device,
                        "unable to set any I/O queues\n");
                return -ENOMEM;
        }
 
+       ctrl->ctrl.queue_count = nr_io_queues + 1;
        dev_info(ctrl->ctrl.device,
                "creating %d I/O queues.\n", nr_io_queues);
 
@@ -1730,10 +1730,10 @@ static void nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
        struct request *rq;
        struct nvme_rdma_request *req;
 
-       rq = blk_mq_tag_to_rq(nvme_rdma_tagset(queue), cqe->command_id);
+       rq = nvme_find_rq(nvme_rdma_tagset(queue), cqe->command_id);
        if (!rq) {
                dev_err(queue->ctrl->ctrl.device,
-                       "tag 0x%x on QP %#x not found\n",
+                       "got bad command_id %#x on QP %#x\n",
                        cqe->command_id, queue->qp->qp_num);
                nvme_rdma_error_recovery(queue->ctrl);
                return;
index 8cb15ee..6450256 100644 (file)
@@ -487,11 +487,11 @@ static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
 {
        struct request *rq;
 
-       rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), cqe->command_id);
+       rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id);
        if (!rq) {
                dev_err(queue->ctrl->ctrl.device,
-                       "queue %d tag 0x%x not found\n",
-                       nvme_tcp_queue_id(queue), cqe->command_id);
+                       "got bad cqe.command_id %#x on queue %d\n",
+                       cqe->command_id, nvme_tcp_queue_id(queue));
                nvme_tcp_error_recovery(&queue->ctrl->ctrl);
                return -EINVAL;
        }
@@ -508,11 +508,11 @@ static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
 {
        struct request *rq;
 
-       rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
+       rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
        if (!rq) {
                dev_err(queue->ctrl->ctrl.device,
-                       "queue %d tag %#x not found\n",
-                       nvme_tcp_queue_id(queue), pdu->command_id);
+                       "got bad c2hdata.command_id %#x on queue %d\n",
+                       pdu->command_id, nvme_tcp_queue_id(queue));
                return -ENOENT;
        }
 
@@ -606,7 +606,7 @@ static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
        data->hdr.plen =
                cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
        data->ttag = pdu->ttag;
-       data->command_id = rq->tag;
+       data->command_id = nvme_cid(rq);
        data->data_offset = cpu_to_le32(req->data_sent);
        data->data_length = cpu_to_le32(req->pdu_len);
        return 0;
@@ -619,11 +619,11 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
        struct request *rq;
        int ret;
 
-       rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
+       rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
        if (!rq) {
                dev_err(queue->ctrl->ctrl.device,
-                       "queue %d tag %#x not found\n",
-                       nvme_tcp_queue_id(queue), pdu->command_id);
+                       "got bad r2t.command_id %#x on queue %d\n",
+                       pdu->command_id, nvme_tcp_queue_id(queue));
                return -ENOENT;
        }
        req = blk_mq_rq_to_pdu(rq);
@@ -702,17 +702,9 @@ static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
                              unsigned int *offset, size_t *len)
 {
        struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
-       struct nvme_tcp_request *req;
-       struct request *rq;
-
-       rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
-       if (!rq) {
-               dev_err(queue->ctrl->ctrl.device,
-                       "queue %d tag %#x not found\n",
-                       nvme_tcp_queue_id(queue), pdu->command_id);
-               return -ENOENT;
-       }
-       req = blk_mq_rq_to_pdu(rq);
+       struct request *rq =
+               nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
+       struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
 
        while (true) {
                int recv_len, ret;
@@ -804,8 +796,8 @@ static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
        }
 
        if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
-               struct request *rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue),
-                                               pdu->command_id);
+               struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
+                                       pdu->command_id);
 
                nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
                queue->nr_cqe++;
@@ -1228,6 +1220,7 @@ static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
 
        sock_release(queue->sock);
        kfree(queue->pdu);
+       mutex_destroy(&queue->send_mutex);
        mutex_destroy(&queue->queue_lock);
 }
 
@@ -1533,6 +1526,7 @@ err_sock:
        sock_release(queue->sock);
        queue->sock = NULL;
 err_destroy_mutex:
+       mutex_destroy(&queue->send_mutex);
        mutex_destroy(&queue->queue_lock);
        return ret;
 }
@@ -1769,13 +1763,13 @@ static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
        if (ret)
                return ret;
 
-       ctrl->queue_count = nr_io_queues + 1;
-       if (ctrl->queue_count < 2) {
+       if (nr_io_queues == 0) {
                dev_err(ctrl->device,
                        "unable to set any I/O queues\n");
                return -ENOMEM;
        }
 
+       ctrl->queue_count = nr_io_queues + 1;
        dev_info(ctrl->device,
                "creating %d I/O queues.\n", nr_io_queues);
 
index 6543015..2a89c5a 100644 (file)
@@ -72,6 +72,20 @@ static const char *nvme_trace_admin_identify(struct trace_seq *p, u8 *cdw10)
        return ret;
 }
 
+static const char *nvme_trace_admin_set_features(struct trace_seq *p,
+                                                u8 *cdw10)
+{
+       const char *ret = trace_seq_buffer_ptr(p);
+       u8 fid = cdw10[0];
+       u8 sv = cdw10[3] & 0x8;
+       u32 cdw11 = get_unaligned_le32(cdw10 + 4);
+
+       trace_seq_printf(p, "fid=0x%x, sv=0x%x, cdw11=0x%x", fid, sv, cdw11);
+       trace_seq_putc(p, 0);
+
+       return ret;
+}
+
 static const char *nvme_trace_admin_get_features(struct trace_seq *p,
                                                 u8 *cdw10)
 {
@@ -80,7 +94,7 @@ static const char *nvme_trace_admin_get_features(struct trace_seq *p,
        u8 sel = cdw10[1] & 0x7;
        u32 cdw11 = get_unaligned_le32(cdw10 + 4);
 
-       trace_seq_printf(p, "fid=0x%x sel=0x%x cdw11=0x%x", fid, sel, cdw11);
+       trace_seq_printf(p, "fid=0x%x, sel=0x%x, cdw11=0x%x", fid, sel, cdw11);
        trace_seq_putc(p, 0);
 
        return ret;
@@ -201,6 +215,8 @@ const char *nvme_trace_parse_admin_cmd(struct trace_seq *p,
                return nvme_trace_create_cq(p, cdw10);
        case nvme_admin_identify:
                return nvme_trace_admin_identify(p, cdw10);
+       case nvme_admin_set_features:
+               return nvme_trace_admin_set_features(p, cdw10);
        case nvme_admin_get_features:
                return nvme_trace_admin_get_features(p, cdw10);
        case nvme_admin_get_lba_status:
index 4be2ece..973561c 100644 (file)
@@ -31,7 +31,6 @@ config NVME_TARGET_PASSTHRU
 config NVME_TARGET_LOOP
        tristate "NVMe loopback device support"
        depends on NVME_TARGET
-       select NVME_CORE
        select NVME_FABRICS
        select SG_POOL
        help
@@ -65,7 +64,6 @@ config NVME_TARGET_FC
 config NVME_TARGET_FCLOOP
        tristate "NVMe over Fabrics FC Transport Loopback Test driver"
        depends on NVME_TARGET
-       select NVME_CORE
        select NVME_FABRICS
        select SG_POOL
        depends on NVME_FC
index ac7210a..66d05ee 100644 (file)
@@ -802,6 +802,7 @@ void nvmet_sq_destroy(struct nvmet_sq *sq)
                 * controller teardown as a result of a keep-alive expiration.
                 */
                ctrl->reset_tbkas = true;
+               sq->ctrl->sqs[sq->qid] = NULL;
                nvmet_ctrl_put(ctrl);
                sq->ctrl = NULL; /* allows reusing the queue later */
        }
index 7d0f352..7d0454c 100644 (file)
@@ -109,21 +109,38 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
        u16 qid = le16_to_cpu(c->qid);
        u16 sqsize = le16_to_cpu(c->sqsize);
        struct nvmet_ctrl *old;
+       u16 mqes = NVME_CAP_MQES(ctrl->cap);
        u16 ret;
 
-       old = cmpxchg(&req->sq->ctrl, NULL, ctrl);
-       if (old) {
-               pr_warn("queue already connected!\n");
-               req->error_loc = offsetof(struct nvmf_connect_command, opcode);
-               return NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
-       }
        if (!sqsize) {
                pr_warn("queue size zero!\n");
                req->error_loc = offsetof(struct nvmf_connect_command, sqsize);
+               req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize);
                ret = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
                goto err;
        }
 
+       if (ctrl->sqs[qid] != NULL) {
+               pr_warn("qid %u has already been created\n", qid);
+               req->error_loc = offsetof(struct nvmf_connect_command, qid);
+               return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
+       }
+
+       if (sqsize > mqes) {
+               pr_warn("sqsize %u is larger than MQES supported %u cntlid %d\n",
+                               sqsize, mqes, ctrl->cntlid);
+               req->error_loc = offsetof(struct nvmf_connect_command, sqsize);
+               req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize);
+               return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
+       }
+
+       old = cmpxchg(&req->sq->ctrl, NULL, ctrl);
+       if (old) {
+               pr_warn("queue already connected!\n");
+               req->error_loc = offsetof(struct nvmf_connect_command, opcode);
+               return NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
+       }
+
        /* note: convert queue size from 0's-based value to 1's-based value */
        nvmet_cq_setup(ctrl, req->cq, qid, sqsize + 1);
        nvmet_sq_setup(ctrl, req->sq, qid, sqsize + 1);
@@ -138,6 +155,7 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
                if (ret) {
                        pr_err("failed to install queue %d cntlid %d ret %x\n",
                                qid, ctrl->cntlid, ret);
+                       ctrl->sqs[qid] = NULL;
                        goto err;
                }
        }
@@ -260,11 +278,11 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
        }
 
        status = nvmet_install_queue(ctrl, req);
-       if (status) {
-               /* pass back cntlid that had the issue of installing queue */
-               req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
+       if (status)
                goto out_ctrl_put;
-       }
+
+       /* pass back cntlid for successful completion */
+       req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
 
        pr_debug("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid);
 
index 3a17a7e..0285ccc 100644 (file)
@@ -107,10 +107,10 @@ static void nvme_loop_queue_response(struct nvmet_req *req)
        } else {
                struct request *rq;
 
-               rq = blk_mq_tag_to_rq(nvme_loop_tagset(queue), cqe->command_id);
+               rq = nvme_find_rq(nvme_loop_tagset(queue), cqe->command_id);
                if (!rq) {
                        dev_err(queue->ctrl->ctrl.device,
-                               "tag 0x%x on queue %d not found\n",
+                               "got bad command_id %#x on queue %d\n",
                                cqe->command_id, nvme_loop_queue_idx(queue));
                        return;
                }
index 1373a3c..bff454d 100644 (file)
@@ -27,7 +27,7 @@ static const char *nvmet_trace_admin_get_features(struct trace_seq *p,
        u8 sel = cdw10[1] & 0x7;
        u32 cdw11 = get_unaligned_le32(cdw10 + 4);
 
-       trace_seq_printf(p, "fid=0x%x sel=0x%x cdw11=0x%x", fid, sel, cdw11);
+       trace_seq_printf(p, "fid=0x%x, sel=0x%x, cdw11=0x%x", fid, sel, cdw11);
        trace_seq_putc(p, 0);
 
        return ret;
@@ -49,6 +49,20 @@ static const char *nvmet_trace_get_lba_status(struct trace_seq *p,
        return ret;
 }
 
+static const char *nvmet_trace_admin_set_features(struct trace_seq *p,
+                                                u8 *cdw10)
+{
+       const char *ret = trace_seq_buffer_ptr(p);
+       u8 fid = cdw10[0];
+       u8 sv = cdw10[3] & 0x8;
+       u32 cdw11 = get_unaligned_le32(cdw10 + 4);
+
+       trace_seq_printf(p, "fid=0x%x, sv=0x%x, cdw11=0x%x", fid, sv, cdw11);
+       trace_seq_putc(p, 0);
+
+       return ret;
+}
+
 static const char *nvmet_trace_read_write(struct trace_seq *p, u8 *cdw10)
 {
        const char *ret = trace_seq_buffer_ptr(p);
@@ -94,6 +108,8 @@ const char *nvmet_trace_parse_admin_cmd(struct trace_seq *p,
        switch (opcode) {
        case nvme_admin_identify:
                return nvmet_trace_admin_identify(p, cdw10);
+       case nvme_admin_set_features:
+               return nvmet_trace_admin_set_features(p, cdw10);
        case nvme_admin_get_features:
                return nvmet_trace_admin_get_features(p, cdw10);
        case nvme_admin_get_lba_status:
index 17f8b7a..46bc30f 100644 (file)
@@ -115,14 +115,11 @@ void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req)
        }
 
        status = nvmet_req_find_ns(req);
-       if (status) {
-               status = NVME_SC_INTERNAL;
+       if (status)
                goto done;
-       }
 
        if (!bdev_is_zoned(req->ns->bdev)) {
                req->error_loc = offsetof(struct nvme_identify, nsid);
-               status = NVME_SC_INVALID_NS | NVME_SC_DNR;
                goto done;
        }
 
index b335c07..04b4691 100644 (file)
@@ -893,6 +893,10 @@ static int _set_required_opps(struct device *dev,
        if (!required_opp_tables)
                return 0;
 
+       /* required-opps not fully initialized yet */
+       if (lazy_linking_pending(opp_table))
+               return -EBUSY;
+
        /*
         * We only support genpd's OPPs in the "required-opps" for now, as we
         * don't know much about other use cases. Error out if the required OPP
@@ -903,10 +907,6 @@ static int _set_required_opps(struct device *dev,
                return -ENOENT;
        }
 
-       /* required-opps not fully initialized yet */
-       if (lazy_linking_pending(opp_table))
-               return -EBUSY;
-
        /* Single genpd case */
        if (!genpd_virt_devs)
                return _set_required_opp(dev, dev, opp, 0);
@@ -1856,9 +1856,6 @@ void dev_pm_opp_put_supported_hw(struct opp_table *opp_table)
        if (unlikely(!opp_table))
                return;
 
-       /* Make sure there are no concurrent readers while updating opp_table */
-       WARN_ON(!list_empty(&opp_table->opp_list));
-
        kfree(opp_table->supported_hw);
        opp_table->supported_hw = NULL;
        opp_table->supported_hw_count = 0;
@@ -1944,9 +1941,6 @@ void dev_pm_opp_put_prop_name(struct opp_table *opp_table)
        if (unlikely(!opp_table))
                return;
 
-       /* Make sure there are no concurrent readers while updating opp_table */
-       WARN_ON(!list_empty(&opp_table->opp_list));
-
        kfree(opp_table->prop_name);
        opp_table->prop_name = NULL;
 
@@ -2056,9 +2050,6 @@ void dev_pm_opp_put_regulators(struct opp_table *opp_table)
        if (!opp_table->regulators)
                goto put_opp_table;
 
-       /* Make sure there are no concurrent readers while updating opp_table */
-       WARN_ON(!list_empty(&opp_table->opp_list));
-
        if (opp_table->enabled) {
                for (i = opp_table->regulator_count - 1; i >= 0; i--)
                        regulator_disable(opp_table->regulators[i]);
@@ -2178,9 +2169,6 @@ void dev_pm_opp_put_clkname(struct opp_table *opp_table)
        if (unlikely(!opp_table))
                return;
 
-       /* Make sure there are no concurrent readers while updating opp_table */
-       WARN_ON(!list_empty(&opp_table->opp_list));
-
        clk_put(opp_table->clk);
        opp_table->clk = ERR_PTR(-EINVAL);
 
@@ -2279,9 +2267,6 @@ void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table)
        if (unlikely(!opp_table))
                return;
 
-       /* Make sure there are no concurrent readers while updating opp_table */
-       WARN_ON(!list_empty(&opp_table->opp_list));
-
        opp_table->set_opp = NULL;
 
        mutex_lock(&opp_table->lock);
index d298e38..67f2e07 100644 (file)
@@ -964,8 +964,9 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table)
                }
        }
 
-       /* There should be one of more OPP defined */
-       if (WARN_ON(!count)) {
+       /* There should be one or more OPPs defined */
+       if (!count) {
+               dev_err(dev, "%s: no supported OPPs", __func__);
                ret = -ENOENT;
                goto remove_static_opp;
        }
index 896a45b..654ac4a 100644 (file)
@@ -145,7 +145,7 @@ static int ixp4xx_pci_check_master_abort(struct ixp4xx_pci *p)
        return 0;
 }
 
-static int ixp4xx_pci_read(struct ixp4xx_pci *p, u32 addr, u32 cmd, u32 *data)
+static int ixp4xx_pci_read_indirect(struct ixp4xx_pci *p, u32 addr, u32 cmd, u32 *data)
 {
        ixp4xx_writel(p, IXP4XX_PCI_NP_AD, addr);
 
@@ -170,7 +170,7 @@ static int ixp4xx_pci_read(struct ixp4xx_pci *p, u32 addr, u32 cmd, u32 *data)
        return ixp4xx_pci_check_master_abort(p);
 }
 
-static int ixp4xx_pci_write(struct ixp4xx_pci *p, u32 addr, u32 cmd, u32 data)
+static int ixp4xx_pci_write_indirect(struct ixp4xx_pci *p, u32 addr, u32 cmd, u32 data)
 {
        ixp4xx_writel(p, IXP4XX_PCI_NP_AD, addr);
 
@@ -308,7 +308,7 @@ static int ixp4xx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
        dev_dbg(p->dev, "read_config from %d size %d dev %d:%d:%d address: %08x cmd: %08x\n",
                where, size, bus_num, PCI_SLOT(devfn), PCI_FUNC(devfn), addr, cmd);
 
-       ret = ixp4xx_pci_read(p, addr, cmd, &val);
+       ret = ixp4xx_pci_read_indirect(p, addr, cmd, &val);
        if (ret)
                return PCIBIOS_DEVICE_NOT_FOUND;
 
@@ -356,7 +356,7 @@ static int ixp4xx_pci_write_config(struct pci_bus *bus,  unsigned int devfn,
        dev_dbg(p->dev, "write_config_byte %#x to %d size %d dev %d:%d:%d addr: %08x cmd %08x\n",
                value, where, size, bus_num, PCI_SLOT(devfn), PCI_FUNC(devfn), addr, cmd);
 
-       ret = ixp4xx_pci_write(p, addr, cmd, val);
+       ret = ixp4xx_pci_write_indirect(p, addr, cmd, val);
        if (ret)
                return PCIBIOS_DEVICE_NOT_FOUND;
 
index 9232255..0099a00 100644 (file)
@@ -129,93 +129,95 @@ void __weak arch_restore_msi_irqs(struct pci_dev *dev)
        return default_restore_msi_irqs(dev);
 }
 
-static inline __attribute_const__ u32 msi_mask(unsigned x)
-{
-       /* Don't shift by >= width of type */
-       if (x >= 5)
-               return 0xffffffff;
-       return (1 << (1 << x)) - 1;
-}
-
 /*
  * PCI 2.3 does not specify mask bits for each MSI interrupt.  Attempting to
  * mask all MSI interrupts by clearing the MSI enable bit does not work
  * reliably as devices without an INTx disable bit will then generate a
  * level IRQ which will never be cleared.
  */
-u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
+static inline __attribute_const__ u32 msi_multi_mask(struct msi_desc *desc)
 {
-       u32 mask_bits = desc->masked;
+       /* Don't shift by >= width of type */
+       if (desc->msi_attrib.multi_cap >= 5)
+               return 0xffffffff;
+       return (1 << (1 << desc->msi_attrib.multi_cap)) - 1;
+}
 
-       if (pci_msi_ignore_mask || !desc->msi_attrib.maskbit)
-               return 0;
+static noinline void pci_msi_update_mask(struct msi_desc *desc, u32 clear, u32 set)
+{
+       raw_spinlock_t *lock = &desc->dev->msi_lock;
+       unsigned long flags;
 
-       mask_bits &= ~mask;
-       mask_bits |= flag;
+       raw_spin_lock_irqsave(lock, flags);
+       desc->msi_mask &= ~clear;
+       desc->msi_mask |= set;
        pci_write_config_dword(msi_desc_to_pci_dev(desc), desc->mask_pos,
-                              mask_bits);
-
-       return mask_bits;
+                              desc->msi_mask);
+       raw_spin_unlock_irqrestore(lock, flags);
 }
 
-static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
+static inline void pci_msi_mask(struct msi_desc *desc, u32 mask)
 {
-       desc->masked = __pci_msi_desc_mask_irq(desc, mask, flag);
+       pci_msi_update_mask(desc, 0, mask);
 }
 
-static void __iomem *pci_msix_desc_addr(struct msi_desc *desc)
+static inline void pci_msi_unmask(struct msi_desc *desc, u32 mask)
 {
-       if (desc->msi_attrib.is_virtual)
-               return NULL;
+       pci_msi_update_mask(desc, mask, 0);
+}
 
-       return desc->mask_base +
-               desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
+static inline void __iomem *pci_msix_desc_addr(struct msi_desc *desc)
+{
+       return desc->mask_base + desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
 }
 
 /*
- * This internal function does not flush PCI writes to the device.
- * All users must ensure that they read from the device before either
- * assuming that the device state is up to date, or returning out of this
- * file.  This saves a few milliseconds when initialising devices with lots
- * of MSI-X interrupts.
+ * This internal function does not flush PCI writes to the device.  All
+ * users must ensure that they read from the device before either assuming
+ * that the device state is up to date, or returning out of this file.
+ * It does not affect the msi_desc::msix_ctrl cache either. Use with care!
  */
-u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag)
+static void pci_msix_write_vector_ctrl(struct msi_desc *desc, u32 ctrl)
 {
-       u32 mask_bits = desc->masked;
-       void __iomem *desc_addr;
+       void __iomem *desc_addr = pci_msix_desc_addr(desc);
 
-       if (pci_msi_ignore_mask)
-               return 0;
-
-       desc_addr = pci_msix_desc_addr(desc);
-       if (!desc_addr)
-               return 0;
-
-       mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
-       if (flag & PCI_MSIX_ENTRY_CTRL_MASKBIT)
-               mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
+       writel(ctrl, desc_addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
+}
 
-       writel(mask_bits, desc_addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
+static inline void pci_msix_mask(struct msi_desc *desc)
+{
+       desc->msix_ctrl |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
+       pci_msix_write_vector_ctrl(desc, desc->msix_ctrl);
+       /* Flush write to device */
+       readl(desc->mask_base);
+}
 
-       return mask_bits;
+static inline void pci_msix_unmask(struct msi_desc *desc)
+{
+       desc->msix_ctrl &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
+       pci_msix_write_vector_ctrl(desc, desc->msix_ctrl);
 }
 
-static void msix_mask_irq(struct msi_desc *desc, u32 flag)
+static void __pci_msi_mask_desc(struct msi_desc *desc, u32 mask)
 {
-       desc->masked = __pci_msix_desc_mask_irq(desc, flag);
+       if (pci_msi_ignore_mask || desc->msi_attrib.is_virtual)
+               return;
+
+       if (desc->msi_attrib.is_msix)
+               pci_msix_mask(desc);
+       else if (desc->msi_attrib.maskbit)
+               pci_msi_mask(desc, mask);
 }
 
-static void msi_set_mask_bit(struct irq_data *data, u32 flag)
+static void __pci_msi_unmask_desc(struct msi_desc *desc, u32 mask)
 {
-       struct msi_desc *desc = irq_data_get_msi_desc(data);
+       if (pci_msi_ignore_mask || desc->msi_attrib.is_virtual)
+               return;
 
-       if (desc->msi_attrib.is_msix) {
-               msix_mask_irq(desc, flag);
-               readl(desc->mask_base);         /* Flush write to device */
-       } else {
-               unsigned offset = data->irq - desc->irq;
-               msi_mask_irq(desc, 1 << offset, flag << offset);
-       }
+       if (desc->msi_attrib.is_msix)
+               pci_msix_unmask(desc);
+       else if (desc->msi_attrib.maskbit)
+               pci_msi_unmask(desc, mask);
 }
 
 /**
@@ -224,7 +226,9 @@ static void msi_set_mask_bit(struct irq_data *data, u32 flag)
  */
 void pci_msi_mask_irq(struct irq_data *data)
 {
-       msi_set_mask_bit(data, 1);
+       struct msi_desc *desc = irq_data_get_msi_desc(data);
+
+       __pci_msi_mask_desc(desc, BIT(data->irq - desc->irq));
 }
 EXPORT_SYMBOL_GPL(pci_msi_mask_irq);
 
@@ -234,7 +238,9 @@ EXPORT_SYMBOL_GPL(pci_msi_mask_irq);
  */
 void pci_msi_unmask_irq(struct irq_data *data)
 {
-       msi_set_mask_bit(data, 0);
+       struct msi_desc *desc = irq_data_get_msi_desc(data);
+
+       __pci_msi_unmask_desc(desc, BIT(data->irq - desc->irq));
 }
 EXPORT_SYMBOL_GPL(pci_msi_unmask_irq);
 
@@ -255,10 +261,8 @@ void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
        if (entry->msi_attrib.is_msix) {
                void __iomem *base = pci_msix_desc_addr(entry);
 
-               if (!base) {
-                       WARN_ON(1);
+               if (WARN_ON_ONCE(entry->msi_attrib.is_virtual))
                        return;
-               }
 
                msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR);
                msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR);
@@ -289,13 +293,32 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
                /* Don't touch the hardware now */
        } else if (entry->msi_attrib.is_msix) {
                void __iomem *base = pci_msix_desc_addr(entry);
+               u32 ctrl = entry->msix_ctrl;
+               bool unmasked = !(ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT);
 
-               if (!base)
+               if (entry->msi_attrib.is_virtual)
                        goto skip;
 
+               /*
+                * The specification mandates that the entry is masked
+                * when the message is modified:
+                *
+                * "If software changes the Address or Data value of an
+                * entry while the entry is unmasked, the result is
+                * undefined."
+                */
+               if (unmasked)
+                       pci_msix_write_vector_ctrl(entry, ctrl | PCI_MSIX_ENTRY_CTRL_MASKBIT);
+
                writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR);
                writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR);
                writel(msg->data, base + PCI_MSIX_ENTRY_DATA);
+
+               if (unmasked)
+                       pci_msix_write_vector_ctrl(entry, ctrl);
+
+               /* Ensure that the writes are visible in the device */
+               readl(base + PCI_MSIX_ENTRY_DATA);
        } else {
                int pos = dev->msi_cap;
                u16 msgctl;
@@ -316,6 +339,8 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
                        pci_write_config_word(dev, pos + PCI_MSI_DATA_32,
                                              msg->data);
                }
+               /* Ensure that the writes are visible in the device */
+               pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl);
        }
 
 skip:
@@ -338,9 +363,7 @@ static void free_msi_irqs(struct pci_dev *dev)
 {
        struct list_head *msi_list = dev_to_msi_list(&dev->dev);
        struct msi_desc *entry, *tmp;
-       struct attribute **msi_attrs;
-       struct device_attribute *dev_attr;
-       int i, count = 0;
+       int i;
 
        for_each_pci_msi_entry(entry, dev)
                if (entry->irq)
@@ -360,18 +383,7 @@ static void free_msi_irqs(struct pci_dev *dev)
        }
 
        if (dev->msi_irq_groups) {
-               sysfs_remove_groups(&dev->dev.kobj, dev->msi_irq_groups);
-               msi_attrs = dev->msi_irq_groups[0]->attrs;
-               while (msi_attrs[count]) {
-                       dev_attr = container_of(msi_attrs[count],
-                                               struct device_attribute, attr);
-                       kfree(dev_attr->attr.name);
-                       kfree(dev_attr);
-                       ++count;
-               }
-               kfree(msi_attrs);
-               kfree(dev->msi_irq_groups[0]);
-               kfree(dev->msi_irq_groups);
+               msi_destroy_sysfs(&dev->dev, dev->msi_irq_groups);
                dev->msi_irq_groups = NULL;
        }
 }
@@ -408,8 +420,7 @@ static void __pci_restore_msi_state(struct pci_dev *dev)
        arch_restore_msi_irqs(dev);
 
        pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
-       msi_mask_irq(entry, msi_mask(entry->msi_attrib.multi_cap),
-                    entry->masked);
+       pci_msi_update_mask(entry, 0, 0);
        control &= ~PCI_MSI_FLAGS_QSIZE;
        control |= (entry->msi_attrib.multiple << 4) | PCI_MSI_FLAGS_ENABLE;
        pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
@@ -440,7 +451,7 @@ static void __pci_restore_msix_state(struct pci_dev *dev)
 
        arch_restore_msi_irqs(dev);
        for_each_pci_msi_entry(entry, dev)
-               msix_mask_irq(entry, entry->masked);
+               pci_msix_write_vector_ctrl(entry, entry->msix_ctrl);
 
        pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
 }
@@ -452,102 +463,6 @@ void pci_restore_msi_state(struct pci_dev *dev)
 }
 EXPORT_SYMBOL_GPL(pci_restore_msi_state);
 
-static ssize_t msi_mode_show(struct device *dev, struct device_attribute *attr,
-                            char *buf)
-{
-       struct msi_desc *entry;
-       unsigned long irq;
-       int retval;
-
-       retval = kstrtoul(attr->attr.name, 10, &irq);
-       if (retval)
-               return retval;
-
-       entry = irq_get_msi_desc(irq);
-       if (!entry)
-               return -ENODEV;
-
-       return sysfs_emit(buf, "%s\n",
-                         entry->msi_attrib.is_msix ? "msix" : "msi");
-}
-
-static int populate_msi_sysfs(struct pci_dev *pdev)
-{
-       struct attribute **msi_attrs;
-       struct attribute *msi_attr;
-       struct device_attribute *msi_dev_attr;
-       struct attribute_group *msi_irq_group;
-       const struct attribute_group **msi_irq_groups;
-       struct msi_desc *entry;
-       int ret = -ENOMEM;
-       int num_msi = 0;
-       int count = 0;
-       int i;
-
-       /* Determine how many msi entries we have */
-       for_each_pci_msi_entry(entry, pdev)
-               num_msi += entry->nvec_used;
-       if (!num_msi)
-               return 0;
-
-       /* Dynamically create the MSI attributes for the PCI device */
-       msi_attrs = kcalloc(num_msi + 1, sizeof(void *), GFP_KERNEL);
-       if (!msi_attrs)
-               return -ENOMEM;
-       for_each_pci_msi_entry(entry, pdev) {
-               for (i = 0; i < entry->nvec_used; i++) {
-                       msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL);
-                       if (!msi_dev_attr)
-                               goto error_attrs;
-                       msi_attrs[count] = &msi_dev_attr->attr;
-
-                       sysfs_attr_init(&msi_dev_attr->attr);
-                       msi_dev_attr->attr.name = kasprintf(GFP_KERNEL, "%d",
-                                                           entry->irq + i);
-                       if (!msi_dev_attr->attr.name)
-                               goto error_attrs;
-                       msi_dev_attr->attr.mode = S_IRUGO;
-                       msi_dev_attr->show = msi_mode_show;
-                       ++count;
-               }
-       }
-
-       msi_irq_group = kzalloc(sizeof(*msi_irq_group), GFP_KERNEL);
-       if (!msi_irq_group)
-               goto error_attrs;
-       msi_irq_group->name = "msi_irqs";
-       msi_irq_group->attrs = msi_attrs;
-
-       msi_irq_groups = kcalloc(2, sizeof(void *), GFP_KERNEL);
-       if (!msi_irq_groups)
-               goto error_irq_group;
-       msi_irq_groups[0] = msi_irq_group;
-
-       ret = sysfs_create_groups(&pdev->dev.kobj, msi_irq_groups);
-       if (ret)
-               goto error_irq_groups;
-       pdev->msi_irq_groups = msi_irq_groups;
-
-       return 0;
-
-error_irq_groups:
-       kfree(msi_irq_groups);
-error_irq_group:
-       kfree(msi_irq_group);
-error_attrs:
-       count = 0;
-       msi_attr = msi_attrs[count];
-       while (msi_attr) {
-               msi_dev_attr = container_of(msi_attr, struct device_attribute, attr);
-               kfree(msi_attr->name);
-               kfree(msi_dev_attr);
-               ++count;
-               msi_attr = msi_attrs[count];
-       }
-       kfree(msi_attrs);
-       return ret;
-}
-
 static struct msi_desc *
 msi_setup_entry(struct pci_dev *dev, int nvec, struct irq_affinity *affd)
 {
@@ -581,7 +496,7 @@ msi_setup_entry(struct pci_dev *dev, int nvec, struct irq_affinity *affd)
 
        /* Save the initial mask status */
        if (entry->msi_attrib.maskbit)
-               pci_read_config_dword(dev, entry->mask_pos, &entry->masked);
+               pci_read_config_dword(dev, entry->mask_pos, &entry->msi_mask);
 
 out:
        kfree(masks);
@@ -592,8 +507,11 @@ static int msi_verify_entries(struct pci_dev *dev)
 {
        struct msi_desc *entry;
 
+       if (!dev->no_64bit_msi)
+               return 0;
+
        for_each_pci_msi_entry(entry, dev) {
-               if (entry->msg.address_hi && dev->no_64bit_msi) {
+               if (entry->msg.address_hi) {
                        pci_err(dev, "arch assigned 64-bit MSI address %#x%08x but device only supports 32 bits\n",
                                entry->msg.address_hi, entry->msg.address_lo);
                        return -EIO;
@@ -619,7 +537,6 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
 {
        struct msi_desc *entry;
        int ret;
-       unsigned mask;
 
        pci_msi_set_enable(dev, 0);     /* Disable MSI during set up */
 
@@ -628,31 +545,23 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
                return -ENOMEM;
 
        /* All MSIs are unmasked by default; mask them all */
-       mask = msi_mask(entry->msi_attrib.multi_cap);
-       msi_mask_irq(entry, mask, mask);
+       pci_msi_mask(entry, msi_multi_mask(entry));
 
        list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
 
        /* Configure MSI capability structure */
        ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
-       if (ret) {
-               msi_mask_irq(entry, mask, ~mask);
-               free_msi_irqs(dev);
-               return ret;
-       }
+       if (ret)
+               goto err;
 
        ret = msi_verify_entries(dev);
-       if (ret) {
-               msi_mask_irq(entry, mask, ~mask);
-               free_msi_irqs(dev);
-               return ret;
-       }
+       if (ret)
+               goto err;
 
-       ret = populate_msi_sysfs(dev);
-       if (ret) {
-               msi_mask_irq(entry, mask, ~mask);
-               free_msi_irqs(dev);
-               return ret;
+       dev->msi_irq_groups = msi_populate_sysfs(&dev->dev);
+       if (IS_ERR(dev->msi_irq_groups)) {
+               ret = PTR_ERR(dev->msi_irq_groups);
+               goto err;
        }
 
        /* Set MSI enabled bits */
@@ -663,6 +572,11 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
        pcibios_free_irq(dev);
        dev->irq = entry->irq;
        return 0;
+
+err:
+       pci_msi_unmask(entry, msi_multi_mask(entry));
+       free_msi_irqs(dev);
+       return ret;
 }
 
 static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries)
@@ -691,6 +605,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
 {
        struct irq_affinity_desc *curmsk, *masks = NULL;
        struct msi_desc *entry;
+       void __iomem *addr;
        int ret, i;
        int vec_count = pci_msix_vec_count(dev);
 
@@ -711,6 +626,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
 
                entry->msi_attrib.is_msix       = 1;
                entry->msi_attrib.is_64         = 1;
+
                if (entries)
                        entry->msi_attrib.entry_nr = entries[i].entry;
                else
@@ -722,6 +638,11 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
                entry->msi_attrib.default_irq   = dev->irq;
                entry->mask_base                = base;
 
+               if (!entry->msi_attrib.is_virtual) {
+                       addr = pci_msix_desc_addr(entry);
+                       entry->msix_ctrl = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
+               }
+
                list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
                if (masks)
                        curmsk++;
@@ -732,26 +653,28 @@ out:
        return ret;
 }
 
-static void msix_program_entries(struct pci_dev *dev,
-                                struct msix_entry *entries)
+static void msix_update_entries(struct pci_dev *dev, struct msix_entry *entries)
 {
        struct msi_desc *entry;
-       int i = 0;
-       void __iomem *desc_addr;
 
        for_each_pci_msi_entry(entry, dev) {
-               if (entries)
-                       entries[i++].vector = entry->irq;
+               if (entries) {
+                       entries->vector = entry->irq;
+                       entries++;
+               }
+       }
+}
 
-               desc_addr = pci_msix_desc_addr(entry);
-               if (desc_addr)
-                       entry->masked = readl(desc_addr +
-                                             PCI_MSIX_ENTRY_VECTOR_CTRL);
-               else
-                       entry->masked = 0;
+static void msix_mask_all(void __iomem *base, int tsize)
+{
+       u32 ctrl = PCI_MSIX_ENTRY_CTRL_MASKBIT;
+       int i;
 
-               msix_mask_irq(entry, 1);
-       }
+       if (pci_msi_ignore_mask)
+               return;
+
+       for (i = 0; i < tsize; i++, base += PCI_MSIX_ENTRY_SIZE)
+               writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL);
 }
 
 /**
@@ -768,22 +691,33 @@ static void msix_program_entries(struct pci_dev *dev,
 static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
                                int nvec, struct irq_affinity *affd)
 {
-       int ret;
-       u16 control;
        void __iomem *base;
+       int ret, tsize;
+       u16 control;
 
-       /* Ensure MSI-X is disabled while it is set up */
-       pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
+       /*
+        * Some devices require MSI-X to be enabled before the MSI-X
+        * registers can be accessed.  Mask all the vectors to prevent
+        * interrupts coming in before they're fully set up.
+        */
+       pci_msix_clear_and_set_ctrl(dev, 0, PCI_MSIX_FLAGS_MASKALL |
+                                   PCI_MSIX_FLAGS_ENABLE);
 
        pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
        /* Request & Map MSI-X table region */
-       base = msix_map_region(dev, msix_table_size(control));
-       if (!base)
-               return -ENOMEM;
+       tsize = msix_table_size(control);
+       base = msix_map_region(dev, tsize);
+       if (!base) {
+               ret = -ENOMEM;
+               goto out_disable;
+       }
+
+       /* Ensure that all table entries are masked. */
+       msix_mask_all(base, tsize);
 
        ret = msix_setup_entries(dev, base, entries, nvec, affd);
        if (ret)
-               return ret;
+               goto out_disable;
 
        ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
        if (ret)
@@ -794,19 +728,13 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
        if (ret)
                goto out_free;
 
-       /*
-        * Some devices require MSI-X to be enabled before we can touch the
-        * MSI-X registers.  We need to mask all the vectors to prevent
-        * interrupts coming in before they're fully set up.
-        */
-       pci_msix_clear_and_set_ctrl(dev, 0,
-                               PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE);
-
-       msix_program_entries(dev, entries);
+       msix_update_entries(dev, entries);
 
-       ret = populate_msi_sysfs(dev);
-       if (ret)
+       dev->msi_irq_groups = msi_populate_sysfs(&dev->dev);
+       if (IS_ERR(dev->msi_irq_groups)) {
+               ret = PTR_ERR(dev->msi_irq_groups);
                goto out_free;
+       }
 
        /* Set MSI-X enabled bits and unmask the function */
        pci_intx_for_msi(dev, 0);
@@ -836,6 +764,9 @@ out_avail:
 out_free:
        free_msi_irqs(dev);
 
+out_disable:
+       pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
+
        return ret;
 }
 
@@ -916,7 +847,6 @@ EXPORT_SYMBOL(pci_msi_vec_count);
 static void pci_msi_shutdown(struct pci_dev *dev)
 {
        struct msi_desc *desc;
-       u32 mask;
 
        if (!pci_msi_enable || !dev || !dev->msi_enabled)
                return;
@@ -929,9 +859,7 @@ static void pci_msi_shutdown(struct pci_dev *dev)
        dev->msi_enabled = 0;
 
        /* Return the device with MSI unmasked as initial states */
-       mask = msi_mask(desc->msi_attrib.multi_cap);
-       /* Keep cached state to be restored */
-       __pci_msi_desc_mask_irq(desc, mask, ~mask);
+       pci_msi_unmask(desc, msi_multi_mask(desc));
 
        /* Restore dev->irq to its default pin-assertion IRQ */
        dev->irq = desc->msi_attrib.default_irq;
@@ -1016,10 +944,8 @@ static void pci_msix_shutdown(struct pci_dev *dev)
        }
 
        /* Return the device with MSI-X masked as initial states */
-       for_each_pci_msi_entry(entry, dev) {
-               /* Keep cached states to be restored */
-               __pci_msix_desc_mask_irq(entry, 1);
-       }
+       for_each_pci_msi_entry(entry, dev)
+               pci_msix_mask(entry);
 
        pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
        pci_intx_for_msi(dev, 1);
index 5d63df7..7bbf267 100644 (file)
@@ -978,7 +978,7 @@ void pci_create_legacy_files(struct pci_bus *b)
        b->legacy_mem->size = 1024*1024;
        b->legacy_mem->attr.mode = 0600;
        b->legacy_mem->mmap = pci_mmap_legacy_mem;
-       b->legacy_io->mapping = iomem_get_mapping();
+       b->legacy_mem->mapping = iomem_get_mapping();
        pci_adjust_legacy_attr(b, pci_mmap_mem);
        error = device_create_bin_file(&b->dev, b->legacy_mem);
        if (error)
index 6d74386..ab3de15 100644 (file)
@@ -1900,6 +1900,7 @@ static void quirk_ryzen_xhci_d3hot(struct pci_dev *dev)
 }
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e0, quirk_ryzen_xhci_d3hot);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e1, quirk_ryzen_xhci_d3hot);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1639, quirk_ryzen_xhci_d3hot);
 
 #ifdef CONFIG_X86_IO_APIC
 static int dmi_disable_ioapicreroute(const struct dmi_system_id *d)
index 85887d8..192c904 100644 (file)
@@ -112,6 +112,7 @@ static int i82092aa_pci_probe(struct pci_dev *dev,
        for (i = 0; i < socket_count; i++) {
                sockets[i].card_state = 1; /* 1 = present but empty */
                sockets[i].io_base = pci_resource_start(dev, 0);
+               sockets[i].dev = dev;
                sockets[i].socket.features |= SS_CAP_PCCARD;
                sockets[i].socket.map_size = 0x1000;
                sockets[i].socket.irq_mask = 0;
index c8b3e39..781f220 100644 (file)
@@ -833,7 +833,7 @@ static void owl_gpio_irq_handler(struct irq_desc *desc)
        unsigned int parent = irq_desc_get_irq(desc);
        const struct owl_gpio_port *port;
        void __iomem *base;
-       unsigned int pin, irq, offset = 0, i;
+       unsigned int pin, offset = 0, i;
        unsigned long pending_irq;
 
        chained_irq_enter(chip, desc);
@@ -849,8 +849,7 @@ static void owl_gpio_irq_handler(struct irq_desc *desc)
                pending_irq = readl_relaxed(base + port->intc_pd);
 
                for_each_set_bit(pin, &pending_irq, port->pins) {
-                       irq = irq_find_mapping(domain, offset + pin);
-                       generic_handle_irq(irq);
+                       generic_handle_domain_irq(domain, offset + pin);
 
                        /* clear pending interrupt */
                        owl_gpio_update_reg(base + port->intc_pd, pin, true);
index 2c87af1..8b34d2c 100644 (file)
@@ -395,8 +395,8 @@ static void bcm2835_gpio_irq_handle_bank(struct bcm2835_pinctrl *pc,
        events &= pc->enabled_irq_map[bank];
        for_each_set_bit(offset, &events, 32) {
                gpio = (32 * bank) + offset;
-               generic_handle_irq(irq_linear_revmap(pc->gpio_chip.irq.domain,
-                                                    gpio));
+               generic_handle_domain_irq(pc->gpio_chip.irq.domain,
+                                         gpio);
        }
 }
 
index dc511b9..a7a0dd6 100644 (file)
@@ -176,7 +176,6 @@ static void iproc_gpio_irq_handler(struct irq_desc *desc)
 
                for_each_set_bit(bit, &val, NGPIOS_PER_BANK) {
                        unsigned pin = NGPIOS_PER_BANK * i + bit;
-                       int child_irq = irq_find_mapping(gc->irq.domain, pin);
 
                        /*
                         * Clear the interrupt before invoking the
@@ -185,7 +184,7 @@ static void iproc_gpio_irq_handler(struct irq_desc *desc)
                        writel(BIT(bit), chip->base + (i * GPIO_BANK_SIZE) +
                               IPROC_GPIO_INT_CLR_OFFSET);
 
-                       generic_handle_irq(child_irq);
+                       generic_handle_domain_irq(gc->irq.domain, pin);
                }
        }
 
index a00a42a..e031428 100644 (file)
@@ -155,8 +155,7 @@ static irqreturn_t nsp_gpio_irq_handler(int irq, void *data)
                int_bits = level | event;
 
                for_each_set_bit(bit, &int_bits, gc->ngpio)
-                       generic_handle_irq(
-                               irq_linear_revmap(gc->irq.domain, bit));
+                       generic_handle_domain_irq(gc->irq.domain, bit);
        }
 
        return  int_bits ? IRQ_HANDLED : IRQ_NONE;
index 394a421..8f23d12 100644 (file)
@@ -1444,7 +1444,6 @@ static void byt_gpio_irq_handler(struct irq_desc *desc)
        u32 base, pin;
        void __iomem *reg;
        unsigned long pending;
-       unsigned int virq;
 
        /* check from GPIO controller which pin triggered the interrupt */
        for (base = 0; base < vg->chip.ngpio; base += 32) {
@@ -1460,10 +1459,8 @@ static void byt_gpio_irq_handler(struct irq_desc *desc)
                raw_spin_lock(&byt_lock);
                pending = readl(reg);
                raw_spin_unlock(&byt_lock);
-               for_each_set_bit(pin, &pending, 32) {
-                       virq = irq_find_mapping(vg->chip.irq.domain, base + pin);
-                       generic_handle_irq(virq);
-               }
+               for_each_set_bit(pin, &pending, 32)
+                       generic_handle_domain_irq(vg->chip.irq.domain, base + pin);
        }
        chip->irq_eoi(data);
 }
index 2ed17cd..9800990 100644 (file)
@@ -1409,11 +1409,10 @@ static void chv_gpio_irq_handler(struct irq_desc *desc)
        raw_spin_unlock_irqrestore(&chv_lock, flags);
 
        for_each_set_bit(intr_line, &pending, community->nirqs) {
-               unsigned int irq, offset;
+               unsigned int offset;
 
                offset = cctx->intr_lines[intr_line];
-               irq = irq_find_mapping(gc->irq.domain, offset);
-               generic_handle_irq(irq);
+               generic_handle_domain_irq(gc->irq.domain, offset);
        }
 
        chained_irq_exit(chip, desc);
index 0a48ca4..561fa32 100644 (file)
@@ -653,12 +653,8 @@ static void lp_gpio_irq_handler(struct irq_desc *desc)
                /* Only interrupts that are enabled */
                pending = ioread32(reg) & ioread32(ena);
 
-               for_each_set_bit(pin, &pending, 32) {
-                       unsigned int irq;
-
-                       irq = irq_find_mapping(lg->chip.irq.domain, base + pin);
-                       generic_handle_irq(irq);
-               }
+               for_each_set_bit(pin, &pending, 32)
+                       generic_handle_domain_irq(lg->chip.irq.domain, base + pin);
        }
        chip->irq_eoi(data);
 }
index 3e4ef2b..0bcd195 100644 (file)
@@ -701,32 +701,32 @@ static const struct pinctrl_pin_desc tglh_pins[] = {
 
 static const struct intel_padgroup tglh_community0_gpps[] = {
        TGL_GPP(0, 0, 24, 0),                           /* GPP_A */
-       TGL_GPP(1, 25, 44, 128),                        /* GPP_R */
-       TGL_GPP(2, 45, 70, 32),                         /* GPP_B */
-       TGL_GPP(3, 71, 78, INTEL_GPIO_BASE_NOMAP),      /* vGPIO_0 */
+       TGL_GPP(1, 25, 44, 32),                         /* GPP_R */
+       TGL_GPP(2, 45, 70, 64),                         /* GPP_B */
+       TGL_GPP(3, 71, 78, 96),                         /* vGPIO_0 */
 };
 
 static const struct intel_padgroup tglh_community1_gpps[] = {
-       TGL_GPP(0, 79, 104, 96),                        /* GPP_D */
-       TGL_GPP(1, 105, 128, 64),                       /* GPP_C */
-       TGL_GPP(2, 129, 136, 160),                      /* GPP_S */
-       TGL_GPP(3, 137, 153, 192),                      /* GPP_G */
-       TGL_GPP(4, 154, 180, 224),                      /* vGPIO */
+       TGL_GPP(0, 79, 104, 128),                       /* GPP_D */
+       TGL_GPP(1, 105, 128, 160),                      /* GPP_C */
+       TGL_GPP(2, 129, 136, 192),                      /* GPP_S */
+       TGL_GPP(3, 137, 153, 224),                      /* GPP_G */
+       TGL_GPP(4, 154, 180, 256),                      /* vGPIO */
 };
 
 static const struct intel_padgroup tglh_community3_gpps[] = {
-       TGL_GPP(0, 181, 193, 256),                      /* GPP_E */
-       TGL_GPP(1, 194, 217, 288),                      /* GPP_F */
+       TGL_GPP(0, 181, 193, 288),                      /* GPP_E */
+       TGL_GPP(1, 194, 217, 320),                      /* GPP_F */
 };
 
 static const struct intel_padgroup tglh_community4_gpps[] = {
-       TGL_GPP(0, 218, 241, 320),                      /* GPP_H */
+       TGL_GPP(0, 218, 241, 352),                      /* GPP_H */
        TGL_GPP(1, 242, 251, 384),                      /* GPP_J */
-       TGL_GPP(2, 252, 266, 352),                      /* GPP_K */
+       TGL_GPP(2, 252, 266, 416),                      /* GPP_K */
 };
 
 static const struct intel_padgroup tglh_community5_gpps[] = {
-       TGL_GPP(0, 267, 281, 416),                      /* GPP_I */
+       TGL_GPP(0, 267, 281, 448),                      /* GPP_I */
        TGL_GPP(1, 282, 290, INTEL_GPIO_BASE_NOMAP),    /* JTAG */
 };
 
index 3b9b5db..f7b54a5 100644 (file)
@@ -319,7 +319,7 @@ static void mtk_eint_irq_handler(struct irq_desc *desc)
        struct irq_chip *chip = irq_desc_get_chip(desc);
        struct mtk_eint *eint = irq_desc_get_handler_data(desc);
        unsigned int status, eint_num;
-       int offset, mask_offset, index, virq;
+       int offset, mask_offset, index;
        void __iomem *reg =  mtk_eint_get_offset(eint, 0, eint->regs->stat);
        int dual_edge, start_level, curr_level;
 
@@ -331,7 +331,6 @@ static void mtk_eint_irq_handler(struct irq_desc *desc)
                        offset = __ffs(status);
                        mask_offset = eint_num >> 5;
                        index = eint_num + offset;
-                       virq = irq_find_mapping(eint->domain, index);
                        status &= ~BIT(offset);
 
                        /*
@@ -361,7 +360,7 @@ static void mtk_eint_irq_handler(struct irq_desc *desc)
                                                                 index);
                        }
 
-                       generic_handle_irq(virq);
+                       generic_handle_domain_irq(eint->domain, index);
 
                        if (dual_edge) {
                                curr_level = mtk_eint_flip_edge(eint, index);
index 5b3b048..45ebdeb 100644 (file)
@@ -925,12 +925,10 @@ int mtk_pinconf_adv_pull_set(struct mtk_pinctrl *hw,
                        err = hw->soc->bias_set(hw, desc, pullup);
                        if (err)
                                return err;
-               } else if (hw->soc->bias_set_combo) {
-                       err = hw->soc->bias_set_combo(hw, desc, pullup, arg);
-                       if (err)
-                               return err;
                } else {
-                       return -ENOTSUPP;
+                       err = mtk_pinconf_bias_set_rev1(hw, desc, pullup);
+                       if (err)
+                               err = mtk_pinconf_bias_set(hw, desc, pullup);
                }
        }
 
index abfe11c..39828e9 100644 (file)
@@ -815,7 +815,7 @@ static void nmk_gpio_irq_handler(struct irq_desc *desc)
        while (status) {
                int bit = __ffs(status);
 
-               generic_handle_irq(irq_find_mapping(chip->irq.domain, bit));
+               generic_handle_domain_irq(chip->irq.domain, bit);
                status &= ~BIT(bit);
        }
 
index bb1ea47..4d81908 100644 (file)
@@ -231,7 +231,7 @@ static void npcmgpio_irq_handler(struct irq_desc *desc)
 
        sts &= en;
        for_each_set_bit(bit, (const void *)&sts, NPCM7XX_GPIO_PER_BANK)
-               generic_handle_irq(irq_linear_revmap(gc->irq.domain, bit));
+               generic_handle_domain_irq(gc->irq.domain, bit);
        chained_irq_exit(chip, desc);
 }
 
index a76be6c..c001f2e 100644 (file)
@@ -444,8 +444,7 @@ static int amd_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
        unsigned long flags;
        struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
        struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
-       u32 wake_mask = BIT(WAKE_CNTRL_OFF_S0I3) | BIT(WAKE_CNTRL_OFF_S3) |
-                       BIT(WAKE_CNTRL_OFF_S4);
+       u32 wake_mask = BIT(WAKE_CNTRL_OFF_S0I3) | BIT(WAKE_CNTRL_OFF_S3);
 
        raw_spin_lock_irqsave(&gpio_dev->lock, flags);
        pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
@@ -621,14 +620,12 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
                        if (!(regval & PIN_IRQ_PENDING) ||
                            !(regval & BIT(INTERRUPT_MASK_OFF)))
                                continue;
-                       irq = irq_find_mapping(gc->irq.domain, irqnr + i);
-                       if (irq != 0)
-                               generic_handle_irq(irq);
+                       generic_handle_domain_irq(gc->irq.domain, irqnr + i);
 
                        /* Clear interrupt.
                         * We must read the pin register again, in case the
                         * value was changed while executing
-                        * generic_handle_irq() above.
+                        * generic_handle_domain_irq() above.
                         * If we didn't find a mapping for the interrupt,
                         * disable it in order to avoid a system hang caused
                         * by an interrupt storm.
index 72e6df7..6022496 100644 (file)
@@ -1712,10 +1712,8 @@ static void gpio_irq_handler(struct irq_desc *desc)
                        continue;
                }
 
-               for_each_set_bit(n, &isr, BITS_PER_LONG) {
-                       generic_handle_irq(irq_find_mapping(
-                                          gpio_chip->irq.domain, n));
-               }
+               for_each_set_bit(n, &isr, BITS_PER_LONG)
+                       generic_handle_domain_irq(gpio_chip->irq.domain, n);
        }
        chained_irq_exit(chip, desc);
        /* now it may re-trigger */
index 38cc20f..fb713f9 100644 (file)
@@ -155,7 +155,7 @@ static void eqbr_irq_handler(struct irq_desc *desc)
        pins = readl(gctrl->membase + GPIO_IRNCR);
 
        for_each_set_bit(offset, &pins, gc->ngpio)
-               generic_handle_irq(irq_find_mapping(gc->irq.domain, offset));
+               generic_handle_domain_irq(gc->irq.domain, offset);
 
        chained_irq_exit(ic, desc);
 }
index 983ba98..ce9cc71 100644 (file)
@@ -3080,7 +3080,7 @@ static void ingenic_gpio_irq_handler(struct irq_desc *desc)
                flag = ingenic_gpio_read_reg(jzgc, JZ4730_GPIO_GPFR);
 
        for_each_set_bit(i, &flag, 32)
-               generic_handle_irq(irq_linear_revmap(gc->irq.domain, i));
+               generic_handle_domain_irq(gc->irq.domain, i);
        chained_irq_exit(irq_chip, desc);
 }
 
index f831526..49e3268 100644 (file)
@@ -950,23 +950,37 @@ static int k210_fpioa_probe(struct platform_device *pdev)
                return ret;
 
        pdata->pclk = devm_clk_get_optional(dev, "pclk");
-       if (!IS_ERR(pdata->pclk))
-               clk_prepare_enable(pdata->pclk);
+       if (!IS_ERR(pdata->pclk)) {
+               ret = clk_prepare_enable(pdata->pclk);
+               if (ret)
+                       goto disable_clk;
+       }
 
        pdata->sysctl_map =
                syscon_regmap_lookup_by_phandle_args(np,
                                                "canaan,k210-sysctl-power",
                                                1, &pdata->power_offset);
-       if (IS_ERR(pdata->sysctl_map))
-               return PTR_ERR(pdata->sysctl_map);
+       if (IS_ERR(pdata->sysctl_map)) {
+               ret = PTR_ERR(pdata->sysctl_map);
+               goto disable_pclk;
+       }
 
        k210_fpioa_init_ties(pdata);
 
        pdata->pctl = pinctrl_register(&k210_pinctrl_desc, dev, (void *)pdata);
-       if (IS_ERR(pdata->pctl))
-               return PTR_ERR(pdata->pctl);
+       if (IS_ERR(pdata->pctl)) {
+               ret = PTR_ERR(pdata->pctl);
+               goto disable_pclk;
+       }
 
        return 0;
+
+disable_pclk:
+       clk_disable_unprepare(pdata->pclk);
+disable_clk:
+       clk_disable_unprepare(pdata->clk);
+
+       return ret;
 }
 
 static const struct of_device_id k210_fpioa_dt_ids[] = {
index 165cb7a..072bccd 100644 (file)
@@ -673,7 +673,7 @@ static void sgpio_irq_handler(struct irq_desc *desc)
 
                for_each_set_bit(port, &val, SGPIO_BITS_PER_WORD) {
                        gpio = sgpio_addr_to_pin(priv, port, bit);
-                       generic_handle_irq(irq_linear_revmap(chip->irq.domain, gpio));
+                       generic_handle_domain_irq(chip->irq.domain, gpio);
                }
 
                chained_irq_exit(parent_chip, desc);
index e470c16..0a36ec8 100644 (file)
@@ -1290,8 +1290,7 @@ static void ocelot_irq_handler(struct irq_desc *desc)
 
                for_each_set_bit(irq, &irqs,
                                 min(32U, info->desc->npins - 32 * i))
-                       generic_handle_irq(irq_linear_revmap(chip->irq.domain,
-                                                            irq + 32 * i));
+                       generic_handle_domain_irq(chip->irq.domain, irq + 32 * i);
 
                chained_irq_exit(parent_chip, desc);
        }
index 5a31227..cebd810 100644 (file)
@@ -1055,7 +1055,7 @@ static void oxnas_gpio_irq_handler(struct irq_desc *desc)
        stat = readl(bank->reg_base + IRQ_PENDING);
 
        for_each_set_bit(pin, &stat, BITS_PER_LONG)
-               generic_handle_irq(irq_linear_revmap(gc->irq.domain, pin));
+               generic_handle_domain_irq(gc->irq.domain, pin);
 
        chained_irq_exit(chip, desc);
 }
index a6e2a4a..748dabd 100644 (file)
@@ -2101,7 +2101,7 @@ static void pic32_gpio_irq_handler(struct irq_desc *desc)
        pending = pic32_gpio_get_pending(gc, stat);
 
        for_each_set_bit(pin, &pending, BITS_PER_LONG)
-               generic_handle_irq(irq_linear_revmap(gc->irq.domain, pin));
+               generic_handle_domain_irq(gc->irq.domain, pin);
 
        chained_irq_exit(chip, desc);
 }
index ec761ba..8d271c6 100644 (file)
@@ -1306,7 +1306,7 @@ static void pistachio_gpio_irq_handler(struct irq_desc *desc)
        pending = gpio_readl(bank, GPIO_INTERRUPT_STATUS) &
                gpio_readl(bank, GPIO_INTERRUPT_EN);
        for_each_set_bit(pin, &pending, 16)
-               generic_handle_irq(irq_linear_revmap(gc->irq.domain, pin));
+               generic_handle_domain_irq(gc->irq.domain, pin);
        chained_irq_exit(chip, desc);
 }
 
index 067fc42..ae33e37 100644 (file)
@@ -21,8 +21,8 @@
 #include <linux/io.h>
 #include <linux/bitops.h>
 #include <linux/gpio/driver.h>
-#include <linux/of_device.h>
 #include <linux/of_address.h>
+#include <linux/of_device.h>
 #include <linux/of_irq.h>
 #include <linux/pinctrl/machine.h>
 #include <linux/pinctrl/pinconf.h>
 
 #include "core.h"
 #include "pinconf.h"
-
-/* GPIO control registers */
-#define GPIO_SWPORT_DR         0x00
-#define GPIO_SWPORT_DDR                0x04
-#define GPIO_INTEN             0x30
-#define GPIO_INTMASK           0x34
-#define GPIO_INTTYPE_LEVEL     0x38
-#define GPIO_INT_POLARITY      0x3c
-#define GPIO_INT_STATUS                0x40
-#define GPIO_INT_RAWSTATUS     0x44
-#define GPIO_DEBOUNCE          0x48
-#define GPIO_PORTS_EOI         0x4c
-#define GPIO_EXT_PORT          0x50
-#define GPIO_LS_SYNC           0x60
-
-enum rockchip_pinctrl_type {
-       PX30,
-       RV1108,
-       RK2928,
-       RK3066B,
-       RK3128,
-       RK3188,
-       RK3288,
-       RK3308,
-       RK3368,
-       RK3399,
-       RK3568,
-};
-
+#include "pinctrl-rockchip.h"
 
 /**
  * Generate a bitmask for setting a value (v) with a write mask bit in hiword
@@ -84,103 +56,6 @@ enum rockchip_pinctrl_type {
 #define IOMUX_WIDTH_3BIT       BIT(4)
 #define IOMUX_WIDTH_2BIT       BIT(5)
 
-/**
- * struct rockchip_iomux
- * @type: iomux variant using IOMUX_* constants
- * @offset: if initialized to -1 it will be autocalculated, by specifying
- *         an initial offset value the relevant source offset can be reset
- *         to a new value for autocalculating the following iomux registers.
- */
-struct rockchip_iomux {
-       int                             type;
-       int                             offset;
-};
-
-/*
- * enum type index corresponding to rockchip_perpin_drv_list arrays index.
- */
-enum rockchip_pin_drv_type {
-       DRV_TYPE_IO_DEFAULT = 0,
-       DRV_TYPE_IO_1V8_OR_3V0,
-       DRV_TYPE_IO_1V8_ONLY,
-       DRV_TYPE_IO_1V8_3V0_AUTO,
-       DRV_TYPE_IO_3V3_ONLY,
-       DRV_TYPE_MAX
-};
-
-/*
- * enum type index corresponding to rockchip_pull_list arrays index.
- */
-enum rockchip_pin_pull_type {
-       PULL_TYPE_IO_DEFAULT = 0,
-       PULL_TYPE_IO_1V8_ONLY,
-       PULL_TYPE_MAX
-};
-
-/**
- * struct rockchip_drv
- * @drv_type: drive strength variant using rockchip_perpin_drv_type
- * @offset: if initialized to -1 it will be autocalculated, by specifying
- *         an initial offset value the relevant source offset can be reset
- *         to a new value for autocalculating the following drive strength
- *         registers. if used chips own cal_drv func instead to calculate
- *         registers offset, the variant could be ignored.
- */
-struct rockchip_drv {
-       enum rockchip_pin_drv_type      drv_type;
-       int                             offset;
-};
-
-/**
- * struct rockchip_pin_bank
- * @reg_base: register base of the gpio bank
- * @regmap_pull: optional separate register for additional pull settings
- * @clk: clock of the gpio bank
- * @irq: interrupt of the gpio bank
- * @saved_masks: Saved content of GPIO_INTEN at suspend time.
- * @pin_base: first pin number
- * @nr_pins: number of pins in this bank
- * @name: name of the bank
- * @bank_num: number of the bank, to account for holes
- * @iomux: array describing the 4 iomux sources of the bank
- * @drv: array describing the 4 drive strength sources of the bank
- * @pull_type: array describing the 4 pull type sources of the bank
- * @valid: is all necessary information present
- * @of_node: dt node of this bank
- * @drvdata: common pinctrl basedata
- * @domain: irqdomain of the gpio bank
- * @gpio_chip: gpiolib chip
- * @grange: gpio range
- * @slock: spinlock for the gpio bank
- * @toggle_edge_mode: bit mask to toggle (falling/rising) edge mode
- * @recalced_mask: bit mask to indicate a need to recalulate the mask
- * @route_mask: bits describing the routing pins of per bank
- */
-struct rockchip_pin_bank {
-       void __iomem                    *reg_base;
-       struct regmap                   *regmap_pull;
-       struct clk                      *clk;
-       int                             irq;
-       u32                             saved_masks;
-       u32                             pin_base;
-       u8                              nr_pins;
-       char                            *name;
-       u8                              bank_num;
-       struct rockchip_iomux           iomux[4];
-       struct rockchip_drv             drv[4];
-       enum rockchip_pin_pull_type     pull_type[4];
-       bool                            valid;
-       struct device_node              *of_node;
-       struct rockchip_pinctrl         *drvdata;
-       struct irq_domain               *domain;
-       struct gpio_chip                gpio_chip;
-       struct pinctrl_gpio_range       grange;
-       raw_spinlock_t                  slock;
-       u32                             toggle_edge_mode;
-       u32                             recalced_mask;
-       u32                             route_mask;
-};
-
 #define PIN_BANK(id, pins, label)                      \
        {                                               \
                .bank_num       = id,                   \
@@ -320,119 +195,6 @@ struct rockchip_pin_bank {
 #define RK_MUXROUTE_PMU(ID, PIN, FUNC, REG, VAL)       \
        PIN_BANK_MUX_ROUTE_FLAGS(ID, PIN, FUNC, REG, VAL, ROCKCHIP_ROUTE_PMU)
 
-/**
- * struct rockchip_mux_recalced_data: represent a pin iomux data.
- * @num: bank number.
- * @pin: pin number.
- * @bit: index at register.
- * @reg: register offset.
- * @mask: mask bit
- */
-struct rockchip_mux_recalced_data {
-       u8 num;
-       u8 pin;
-       u32 reg;
-       u8 bit;
-       u8 mask;
-};
-
-enum rockchip_mux_route_location {
-       ROCKCHIP_ROUTE_SAME = 0,
-       ROCKCHIP_ROUTE_PMU,
-       ROCKCHIP_ROUTE_GRF,
-};
-
-/**
- * struct rockchip_mux_recalced_data: represent a pin iomux data.
- * @bank_num: bank number.
- * @pin: index at register or used to calc index.
- * @func: the min pin.
- * @route_location: the mux route location (same, pmu, grf).
- * @route_offset: the max pin.
- * @route_val: the register offset.
- */
-struct rockchip_mux_route_data {
-       u8 bank_num;
-       u8 pin;
-       u8 func;
-       enum rockchip_mux_route_location route_location;
-       u32 route_offset;
-       u32 route_val;
-};
-
-struct rockchip_pin_ctrl {
-       struct rockchip_pin_bank        *pin_banks;
-       u32                             nr_banks;
-       u32                             nr_pins;
-       char                            *label;
-       enum rockchip_pinctrl_type      type;
-       int                             grf_mux_offset;
-       int                             pmu_mux_offset;
-       int                             grf_drv_offset;
-       int                             pmu_drv_offset;
-       struct rockchip_mux_recalced_data *iomux_recalced;
-       u32                             niomux_recalced;
-       struct rockchip_mux_route_data *iomux_routes;
-       u32                             niomux_routes;
-
-       void    (*pull_calc_reg)(struct rockchip_pin_bank *bank,
-                                   int pin_num, struct regmap **regmap,
-                                   int *reg, u8 *bit);
-       void    (*drv_calc_reg)(struct rockchip_pin_bank *bank,
-                                   int pin_num, struct regmap **regmap,
-                                   int *reg, u8 *bit);
-       int     (*schmitt_calc_reg)(struct rockchip_pin_bank *bank,
-                                   int pin_num, struct regmap **regmap,
-                                   int *reg, u8 *bit);
-};
-
-struct rockchip_pin_config {
-       unsigned int            func;
-       unsigned long           *configs;
-       unsigned int            nconfigs;
-};
-
-/**
- * struct rockchip_pin_group: represent group of pins of a pinmux function.
- * @name: name of the pin group, used to lookup the group.
- * @pins: the pins included in this group.
- * @npins: number of pins included in this group.
- * @data: local pin configuration
- */
-struct rockchip_pin_group {
-       const char                      *name;
-       unsigned int                    npins;
-       unsigned int                    *pins;
-       struct rockchip_pin_config      *data;
-};
-
-/**
- * struct rockchip_pmx_func: represent a pin function.
- * @name: name of the pin function, used to lookup the function.
- * @groups: one or more names of pin groups that provide this function.
- * @ngroups: number of groups included in @groups.
- */
-struct rockchip_pmx_func {
-       const char              *name;
-       const char              **groups;
-       u8                      ngroups;
-};
-
-struct rockchip_pinctrl {
-       struct regmap                   *regmap_base;
-       int                             reg_size;
-       struct regmap                   *regmap_pull;
-       struct regmap                   *regmap_pmu;
-       struct device                   *dev;
-       struct rockchip_pin_ctrl        *ctrl;
-       struct pinctrl_desc             pctl;
-       struct pinctrl_dev              *pctl_dev;
-       struct rockchip_pin_group       *groups;
-       unsigned int                    ngroups;
-       struct rockchip_pmx_func        *functions;
-       unsigned int                    nfunctions;
-};
-
 static struct regmap_config rockchip_regmap_config = {
        .reg_bits = 32,
        .val_bits = 32,
@@ -2295,86 +2057,11 @@ static int rockchip_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
        return 0;
 }
 
-static int rockchip_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
-{
-       struct rockchip_pin_bank *bank = gpiochip_get_data(chip);
-       u32 data;
-       int ret;
-
-       ret = clk_enable(bank->clk);
-       if (ret < 0) {
-               dev_err(bank->drvdata->dev,
-                       "failed to enable clock for bank %s\n", bank->name);
-               return ret;
-       }
-       data = readl_relaxed(bank->reg_base + GPIO_SWPORT_DDR);
-       clk_disable(bank->clk);
-
-       if (data & BIT(offset))
-               return GPIO_LINE_DIRECTION_OUT;
-
-       return GPIO_LINE_DIRECTION_IN;
-}
-
-/*
- * The calls to gpio_direction_output() and gpio_direction_input()
- * leads to this function call (via the pinctrl_gpio_direction_{input|output}()
- * function called from the gpiolib interface).
- */
-static int _rockchip_pmx_gpio_set_direction(struct gpio_chip *chip,
-                                           int pin, bool input)
-{
-       struct rockchip_pin_bank *bank;
-       int ret;
-       unsigned long flags;
-       u32 data;
-
-       bank = gpiochip_get_data(chip);
-
-       ret = rockchip_set_mux(bank, pin, RK_FUNC_GPIO);
-       if (ret < 0)
-               return ret;
-
-       clk_enable(bank->clk);
-       raw_spin_lock_irqsave(&bank->slock, flags);
-
-       data = readl_relaxed(bank->reg_base + GPIO_SWPORT_DDR);
-       /* set bit to 1 for output, 0 for input */
-       if (!input)
-               data |= BIT(pin);
-       else
-               data &= ~BIT(pin);
-       writel_relaxed(data, bank->reg_base + GPIO_SWPORT_DDR);
-
-       raw_spin_unlock_irqrestore(&bank->slock, flags);
-       clk_disable(bank->clk);
-
-       return 0;
-}
-
-static int rockchip_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
-                                             struct pinctrl_gpio_range *range,
-                                             unsigned offset, bool input)
-{
-       struct rockchip_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
-       struct gpio_chip *chip;
-       int pin;
-
-       chip = range->gc;
-       pin = offset - chip->base;
-       dev_dbg(info->dev, "gpio_direction for pin %u as %s-%d to %s\n",
-                offset, range->name, pin, input ? "input" : "output");
-
-       return _rockchip_pmx_gpio_set_direction(chip, offset - chip->base,
-                                               input);
-}
-
 static const struct pinmux_ops rockchip_pmx_ops = {
        .get_functions_count    = rockchip_pmx_get_funcs_count,
        .get_function_name      = rockchip_pmx_get_func_name,
        .get_function_groups    = rockchip_pmx_get_groups,
        .set_mux                = rockchip_pmx_set,
-       .gpio_set_direction     = rockchip_pmx_gpio_set_direction,
 };
 
 /*
@@ -2405,15 +2092,13 @@ static bool rockchip_pinconf_pull_valid(struct rockchip_pin_ctrl *ctrl,
        return false;
 }
 
-static void rockchip_gpio_set(struct gpio_chip *gc, unsigned offset, int value);
-static int rockchip_gpio_get(struct gpio_chip *gc, unsigned offset);
-
 /* set the pin config settings for a specified pin */
 static int rockchip_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
                                unsigned long *configs, unsigned num_configs)
 {
        struct rockchip_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
        struct rockchip_pin_bank *bank = pin_to_bank(info, pin);
+       struct gpio_chip *gpio = &bank->gpio_chip;
        enum pin_config_param param;
        u32 arg;
        int i;
@@ -2446,10 +2131,13 @@ static int rockchip_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
                                return rc;
                        break;
                case PIN_CONFIG_OUTPUT:
-                       rockchip_gpio_set(&bank->gpio_chip,
-                                         pin - bank->pin_base, arg);
-                       rc = _rockchip_pmx_gpio_set_direction(&bank->gpio_chip,
-                                         pin - bank->pin_base, false);
+                       rc = rockchip_set_mux(bank, pin - bank->pin_base,
+                                             RK_FUNC_GPIO);
+                       if (rc != RK_FUNC_GPIO)
+                               return -EINVAL;
+
+                       rc = gpio->direction_output(gpio, pin - bank->pin_base,
+                                                   arg);
                        if (rc)
                                return rc;
                        break;
@@ -2487,6 +2175,7 @@ static int rockchip_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
 {
        struct rockchip_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
        struct rockchip_pin_bank *bank = pin_to_bank(info, pin);
+       struct gpio_chip *gpio = &bank->gpio_chip;
        enum pin_config_param param = pinconf_to_config_param(*config);
        u16 arg;
        int rc;
@@ -2515,7 +2204,7 @@ static int rockchip_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
                if (rc != RK_FUNC_GPIO)
                        return -EINVAL;
 
-               rc = rockchip_gpio_get(&bank->gpio_chip, pin - bank->pin_base);
+               rc = gpio->get(gpio, pin - bank->pin_base);
                if (rc < 0)
                        return rc;
 
@@ -2753,7 +2442,7 @@ static int rockchip_pinctrl_register(struct platform_device *pdev,
        ctrldesc->npins = info->ctrl->nr_pins;
 
        pdesc = pindesc;
-       for (bank = 0 , k = 0; bank < info->ctrl->nr_banks; bank++) {
+       for (bank = 0, k = 0; bank < info->ctrl->nr_banks; bank++) {
                pin_bank = &info->ctrl->pin_banks[bank];
                for (pin = 0; pin < pin_bank->nr_pins; pin++, k++) {
                        pdesc->number = k;
@@ -2773,553 +2462,9 @@ static int rockchip_pinctrl_register(struct platform_device *pdev,
                return PTR_ERR(info->pctl_dev);
        }
 
-       for (bank = 0; bank < info->ctrl->nr_banks; ++bank) {
-               pin_bank = &info->ctrl->pin_banks[bank];
-               pin_bank->grange.name = pin_bank->name;
-               pin_bank->grange.id = bank;
-               pin_bank->grange.pin_base = pin_bank->pin_base;
-               pin_bank->grange.base = pin_bank->gpio_chip.base;
-               pin_bank->grange.npins = pin_bank->gpio_chip.ngpio;
-               pin_bank->grange.gc = &pin_bank->gpio_chip;
-               pinctrl_add_gpio_range(info->pctl_dev, &pin_bank->grange);
-       }
-
        return 0;
 }
 
-/*
- * GPIO handling
- */
-
-static void rockchip_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
-{
-       struct rockchip_pin_bank *bank = gpiochip_get_data(gc);
-       void __iomem *reg = bank->reg_base + GPIO_SWPORT_DR;
-       unsigned long flags;
-       u32 data;
-
-       clk_enable(bank->clk);
-       raw_spin_lock_irqsave(&bank->slock, flags);
-
-       data = readl(reg);
-       data &= ~BIT(offset);
-       if (value)
-               data |= BIT(offset);
-       writel(data, reg);
-
-       raw_spin_unlock_irqrestore(&bank->slock, flags);
-       clk_disable(bank->clk);
-}
-
-/*
- * Returns the level of the pin for input direction and setting of the DR
- * register for output gpios.
- */
-static int rockchip_gpio_get(struct gpio_chip *gc, unsigned offset)
-{
-       struct rockchip_pin_bank *bank = gpiochip_get_data(gc);
-       u32 data;
-
-       clk_enable(bank->clk);
-       data = readl(bank->reg_base + GPIO_EXT_PORT);
-       clk_disable(bank->clk);
-       data >>= offset;
-       data &= 1;
-       return data;
-}
-
-/*
- * gpiolib gpio_direction_input callback function. The setting of the pin
- * mux function as 'gpio input' will be handled by the pinctrl subsystem
- * interface.
- */
-static int rockchip_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
-{
-       return pinctrl_gpio_direction_input(gc->base + offset);
-}
-
-/*
- * gpiolib gpio_direction_output callback function. The setting of the pin
- * mux function as 'gpio output' will be handled by the pinctrl subsystem
- * interface.
- */
-static int rockchip_gpio_direction_output(struct gpio_chip *gc,
-                                         unsigned offset, int value)
-{
-       rockchip_gpio_set(gc, offset, value);
-       return pinctrl_gpio_direction_output(gc->base + offset);
-}
-
-static void rockchip_gpio_set_debounce(struct gpio_chip *gc,
-                                      unsigned int offset, bool enable)
-{
-       struct rockchip_pin_bank *bank = gpiochip_get_data(gc);
-       void __iomem *reg = bank->reg_base + GPIO_DEBOUNCE;
-       unsigned long flags;
-       u32 data;
-
-       clk_enable(bank->clk);
-       raw_spin_lock_irqsave(&bank->slock, flags);
-
-       data = readl(reg);
-       if (enable)
-               data |= BIT(offset);
-       else
-               data &= ~BIT(offset);
-       writel(data, reg);
-
-       raw_spin_unlock_irqrestore(&bank->slock, flags);
-       clk_disable(bank->clk);
-}
-
-/*
- * gpiolib set_config callback function. The setting of the pin
- * mux function as 'gpio output' will be handled by the pinctrl subsystem
- * interface.
- */
-static int rockchip_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
-                                 unsigned long config)
-{
-       enum pin_config_param param = pinconf_to_config_param(config);
-
-       switch (param) {
-       case PIN_CONFIG_INPUT_DEBOUNCE:
-               rockchip_gpio_set_debounce(gc, offset, true);
-               /*
-                * Rockchip's gpio could only support up to one period
-                * of the debounce clock(pclk), which is far away from
-                * satisftying the requirement, as pclk is usually near
-                * 100MHz shared by all peripherals. So the fact is it
-                * has crippled debounce capability could only be useful
-                * to prevent any spurious glitches from waking up the system
-                * if the gpio is conguired as wakeup interrupt source. Let's
-                * still return -ENOTSUPP as before, to make sure the caller
-                * of gpiod_set_debounce won't change its behaviour.
-                */
-               return -ENOTSUPP;
-       default:
-               return -ENOTSUPP;
-       }
-}
-
-/*
- * gpiolib gpio_to_irq callback function. Creates a mapping between a GPIO pin
- * and a virtual IRQ, if not already present.
- */
-static int rockchip_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
-{
-       struct rockchip_pin_bank *bank = gpiochip_get_data(gc);
-       unsigned int virq;
-
-       if (!bank->domain)
-               return -ENXIO;
-
-       clk_enable(bank->clk);
-       virq = irq_create_mapping(bank->domain, offset);
-       clk_disable(bank->clk);
-
-       return (virq) ? : -ENXIO;
-}
-
-static const struct gpio_chip rockchip_gpiolib_chip = {
-       .request = gpiochip_generic_request,
-       .free = gpiochip_generic_free,
-       .set = rockchip_gpio_set,
-       .get = rockchip_gpio_get,
-       .get_direction  = rockchip_gpio_get_direction,
-       .direction_input = rockchip_gpio_direction_input,
-       .direction_output = rockchip_gpio_direction_output,
-       .set_config = rockchip_gpio_set_config,
-       .to_irq = rockchip_gpio_to_irq,
-       .owner = THIS_MODULE,
-};
-
-/*
- * Interrupt handling
- */
-
-static void rockchip_irq_demux(struct irq_desc *desc)
-{
-       struct irq_chip *chip = irq_desc_get_chip(desc);
-       struct rockchip_pin_bank *bank = irq_desc_get_handler_data(desc);
-       u32 pend;
-
-       dev_dbg(bank->drvdata->dev, "got irq for bank %s\n", bank->name);
-
-       chained_irq_enter(chip, desc);
-
-       pend = readl_relaxed(bank->reg_base + GPIO_INT_STATUS);
-
-       while (pend) {
-               unsigned int irq, virq;
-
-               irq = __ffs(pend);
-               pend &= ~BIT(irq);
-               virq = irq_find_mapping(bank->domain, irq);
-
-               if (!virq) {
-                       dev_err(bank->drvdata->dev, "unmapped irq %d\n", irq);
-                       continue;
-               }
-
-               dev_dbg(bank->drvdata->dev, "handling irq %d\n", irq);
-
-               /*
-                * Triggering IRQ on both rising and falling edge
-                * needs manual intervention.
-                */
-               if (bank->toggle_edge_mode & BIT(irq)) {
-                       u32 data, data_old, polarity;
-                       unsigned long flags;
-
-                       data = readl_relaxed(bank->reg_base + GPIO_EXT_PORT);
-                       do {
-                               raw_spin_lock_irqsave(&bank->slock, flags);
-
-                               polarity = readl_relaxed(bank->reg_base +
-                                                        GPIO_INT_POLARITY);
-                               if (data & BIT(irq))
-                                       polarity &= ~BIT(irq);
-                               else
-                                       polarity |= BIT(irq);
-                               writel(polarity,
-                                      bank->reg_base + GPIO_INT_POLARITY);
-
-                               raw_spin_unlock_irqrestore(&bank->slock, flags);
-
-                               data_old = data;
-                               data = readl_relaxed(bank->reg_base +
-                                                    GPIO_EXT_PORT);
-                       } while ((data & BIT(irq)) != (data_old & BIT(irq)));
-               }
-
-               generic_handle_irq(virq);
-       }
-
-       chained_irq_exit(chip, desc);
-}
-
-static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
-{
-       struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
-       struct rockchip_pin_bank *bank = gc->private;
-       u32 mask = BIT(d->hwirq);
-       u32 polarity;
-       u32 level;
-       u32 data;
-       unsigned long flags;
-       int ret;
-
-       /* make sure the pin is configured as gpio input */
-       ret = rockchip_set_mux(bank, d->hwirq, RK_FUNC_GPIO);
-       if (ret < 0)
-               return ret;
-
-       clk_enable(bank->clk);
-       raw_spin_lock_irqsave(&bank->slock, flags);
-
-       data = readl_relaxed(bank->reg_base + GPIO_SWPORT_DDR);
-       data &= ~mask;
-       writel_relaxed(data, bank->reg_base + GPIO_SWPORT_DDR);
-
-       raw_spin_unlock_irqrestore(&bank->slock, flags);
-
-       if (type & IRQ_TYPE_EDGE_BOTH)
-               irq_set_handler_locked(d, handle_edge_irq);
-       else
-               irq_set_handler_locked(d, handle_level_irq);
-
-       raw_spin_lock_irqsave(&bank->slock, flags);
-       irq_gc_lock(gc);
-
-       level = readl_relaxed(gc->reg_base + GPIO_INTTYPE_LEVEL);
-       polarity = readl_relaxed(gc->reg_base + GPIO_INT_POLARITY);
-
-       switch (type) {
-       case IRQ_TYPE_EDGE_BOTH:
-               bank->toggle_edge_mode |= mask;
-               level |= mask;
-
-               /*
-                * Determine gpio state. If 1 next interrupt should be falling
-                * otherwise rising.
-                */
-               data = readl(bank->reg_base + GPIO_EXT_PORT);
-               if (data & mask)
-                       polarity &= ~mask;
-               else
-                       polarity |= mask;
-               break;
-       case IRQ_TYPE_EDGE_RISING:
-               bank->toggle_edge_mode &= ~mask;
-               level |= mask;
-               polarity |= mask;
-               break;
-       case IRQ_TYPE_EDGE_FALLING:
-               bank->toggle_edge_mode &= ~mask;
-               level |= mask;
-               polarity &= ~mask;
-               break;
-       case IRQ_TYPE_LEVEL_HIGH:
-               bank->toggle_edge_mode &= ~mask;
-               level &= ~mask;
-               polarity |= mask;
-               break;
-       case IRQ_TYPE_LEVEL_LOW:
-               bank->toggle_edge_mode &= ~mask;
-               level &= ~mask;
-               polarity &= ~mask;
-               break;
-       default:
-               irq_gc_unlock(gc);
-               raw_spin_unlock_irqrestore(&bank->slock, flags);
-               clk_disable(bank->clk);
-               return -EINVAL;
-       }
-
-       writel_relaxed(level, gc->reg_base + GPIO_INTTYPE_LEVEL);
-       writel_relaxed(polarity, gc->reg_base + GPIO_INT_POLARITY);
-
-       irq_gc_unlock(gc);
-       raw_spin_unlock_irqrestore(&bank->slock, flags);
-       clk_disable(bank->clk);
-
-       return 0;
-}
-
-static void rockchip_irq_suspend(struct irq_data *d)
-{
-       struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
-       struct rockchip_pin_bank *bank = gc->private;
-
-       clk_enable(bank->clk);
-       bank->saved_masks = irq_reg_readl(gc, GPIO_INTMASK);
-       irq_reg_writel(gc, ~gc->wake_active, GPIO_INTMASK);
-       clk_disable(bank->clk);
-}
-
-static void rockchip_irq_resume(struct irq_data *d)
-{
-       struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
-       struct rockchip_pin_bank *bank = gc->private;
-
-       clk_enable(bank->clk);
-       irq_reg_writel(gc, bank->saved_masks, GPIO_INTMASK);
-       clk_disable(bank->clk);
-}
-
-static void rockchip_irq_enable(struct irq_data *d)
-{
-       struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
-       struct rockchip_pin_bank *bank = gc->private;
-
-       clk_enable(bank->clk);
-       irq_gc_mask_clr_bit(d);
-}
-
-static void rockchip_irq_disable(struct irq_data *d)
-{
-       struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
-       struct rockchip_pin_bank *bank = gc->private;
-
-       irq_gc_mask_set_bit(d);
-       clk_disable(bank->clk);
-}
-
-static int rockchip_interrupts_register(struct platform_device *pdev,
-                                               struct rockchip_pinctrl *info)
-{
-       struct rockchip_pin_ctrl *ctrl = info->ctrl;
-       struct rockchip_pin_bank *bank = ctrl->pin_banks;
-       unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
-       struct irq_chip_generic *gc;
-       int ret;
-       int i;
-
-       for (i = 0; i < ctrl->nr_banks; ++i, ++bank) {
-               if (!bank->valid) {
-                       dev_warn(&pdev->dev, "bank %s is not valid\n",
-                                bank->name);
-                       continue;
-               }
-
-               ret = clk_enable(bank->clk);
-               if (ret) {
-                       dev_err(&pdev->dev, "failed to enable clock for bank %s\n",
-                               bank->name);
-                       continue;
-               }
-
-               bank->domain = irq_domain_add_linear(bank->of_node, 32,
-                                               &irq_generic_chip_ops, NULL);
-               if (!bank->domain) {
-                       dev_warn(&pdev->dev, "could not initialize irq domain for bank %s\n",
-                                bank->name);
-                       clk_disable(bank->clk);
-                       continue;
-               }
-
-               ret = irq_alloc_domain_generic_chips(bank->domain, 32, 1,
-                                        "rockchip_gpio_irq", handle_level_irq,
-                                        clr, 0, 0);
-               if (ret) {
-                       dev_err(&pdev->dev, "could not alloc generic chips for bank %s\n",
-                               bank->name);
-                       irq_domain_remove(bank->domain);
-                       clk_disable(bank->clk);
-                       continue;
-               }
-
-               gc = irq_get_domain_generic_chip(bank->domain, 0);
-               gc->reg_base = bank->reg_base;
-               gc->private = bank;
-               gc->chip_types[0].regs.mask = GPIO_INTMASK;
-               gc->chip_types[0].regs.ack = GPIO_PORTS_EOI;
-               gc->chip_types[0].chip.irq_ack = irq_gc_ack_set_bit;
-               gc->chip_types[0].chip.irq_mask = irq_gc_mask_set_bit;
-               gc->chip_types[0].chip.irq_unmask = irq_gc_mask_clr_bit;
-               gc->chip_types[0].chip.irq_enable = rockchip_irq_enable;
-               gc->chip_types[0].chip.irq_disable = rockchip_irq_disable;
-               gc->chip_types[0].chip.irq_set_wake = irq_gc_set_wake;
-               gc->chip_types[0].chip.irq_suspend = rockchip_irq_suspend;
-               gc->chip_types[0].chip.irq_resume = rockchip_irq_resume;
-               gc->chip_types[0].chip.irq_set_type = rockchip_irq_set_type;
-               gc->wake_enabled = IRQ_MSK(bank->nr_pins);
-
-               /*
-                * Linux assumes that all interrupts start out disabled/masked.
-                * Our driver only uses the concept of masked and always keeps
-                * things enabled, so for us that's all masked and all enabled.
-                */
-               writel_relaxed(0xffffffff, bank->reg_base + GPIO_INTMASK);
-               writel_relaxed(0xffffffff, bank->reg_base + GPIO_PORTS_EOI);
-               writel_relaxed(0xffffffff, bank->reg_base + GPIO_INTEN);
-               gc->mask_cache = 0xffffffff;
-
-               irq_set_chained_handler_and_data(bank->irq,
-                                                rockchip_irq_demux, bank);
-               clk_disable(bank->clk);
-       }
-
-       return 0;
-}
-
-static int rockchip_gpiolib_register(struct platform_device *pdev,
-                                               struct rockchip_pinctrl *info)
-{
-       struct rockchip_pin_ctrl *ctrl = info->ctrl;
-       struct rockchip_pin_bank *bank = ctrl->pin_banks;
-       struct gpio_chip *gc;
-       int ret;
-       int i;
-
-       for (i = 0; i < ctrl->nr_banks; ++i, ++bank) {
-               if (!bank->valid) {
-                       dev_warn(&pdev->dev, "bank %s is not valid\n",
-                                bank->name);
-                       continue;
-               }
-
-               bank->gpio_chip = rockchip_gpiolib_chip;
-
-               gc = &bank->gpio_chip;
-               gc->base = bank->pin_base;
-               gc->ngpio = bank->nr_pins;
-               gc->parent = &pdev->dev;
-               gc->of_node = bank->of_node;
-               gc->label = bank->name;
-
-               ret = gpiochip_add_data(gc, bank);
-               if (ret) {
-                       dev_err(&pdev->dev, "failed to register gpio_chip %s, error code: %d\n",
-                                                       gc->label, ret);
-                       goto fail;
-               }
-       }
-
-       rockchip_interrupts_register(pdev, info);
-
-       return 0;
-
-fail:
-       for (--i, --bank; i >= 0; --i, --bank) {
-               if (!bank->valid)
-                       continue;
-               gpiochip_remove(&bank->gpio_chip);
-       }
-       return ret;
-}
-
-static int rockchip_gpiolib_unregister(struct platform_device *pdev,
-                                               struct rockchip_pinctrl *info)
-{
-       struct rockchip_pin_ctrl *ctrl = info->ctrl;
-       struct rockchip_pin_bank *bank = ctrl->pin_banks;
-       int i;
-
-       for (i = 0; i < ctrl->nr_banks; ++i, ++bank) {
-               if (!bank->valid)
-                       continue;
-               gpiochip_remove(&bank->gpio_chip);
-       }
-
-       return 0;
-}
-
-static int rockchip_get_bank_data(struct rockchip_pin_bank *bank,
-                                 struct rockchip_pinctrl *info)
-{
-       struct resource res;
-       void __iomem *base;
-
-       if (of_address_to_resource(bank->of_node, 0, &res)) {
-               dev_err(info->dev, "cannot find IO resource for bank\n");
-               return -ENOENT;
-       }
-
-       bank->reg_base = devm_ioremap_resource(info->dev, &res);
-       if (IS_ERR(bank->reg_base))
-               return PTR_ERR(bank->reg_base);
-
-       /*
-        * special case, where parts of the pull setting-registers are
-        * part of the PMU register space
-        */
-       if (of_device_is_compatible(bank->of_node,
-                                   "rockchip,rk3188-gpio-bank0")) {
-               struct device_node *node;
-
-               node = of_parse_phandle(bank->of_node->parent,
-                                       "rockchip,pmu", 0);
-               if (!node) {
-                       if (of_address_to_resource(bank->of_node, 1, &res)) {
-                               dev_err(info->dev, "cannot find IO resource for bank\n");
-                               return -ENOENT;
-                       }
-
-                       base = devm_ioremap_resource(info->dev, &res);
-                       if (IS_ERR(base))
-                               return PTR_ERR(base);
-                       rockchip_regmap_config.max_register =
-                                                   resource_size(&res) - 4;
-                       rockchip_regmap_config.name =
-                                           "rockchip,rk3188-gpio-bank0-pull";
-                       bank->regmap_pull = devm_regmap_init_mmio(info->dev,
-                                                   base,
-                                                   &rockchip_regmap_config);
-               }
-               of_node_put(node);
-       }
-
-       bank->irq = irq_of_parse_and_map(bank->of_node, 0);
-
-       bank->clk = of_clk_get(bank->of_node, 0);
-       if (IS_ERR(bank->clk))
-               return PTR_ERR(bank->clk);
-
-       return clk_prepare(bank->clk);
-}
-
 static const struct of_device_id rockchip_pinctrl_dt_match[];
 
 /* retrieve the soc specific data */
@@ -3329,7 +2474,6 @@ static struct rockchip_pin_ctrl *rockchip_pinctrl_get_soc_data(
 {
        const struct of_device_id *match;
        struct device_node *node = pdev->dev.of_node;
-       struct device_node *np;
        struct rockchip_pin_ctrl *ctrl;
        struct rockchip_pin_bank *bank;
        int grf_offs, pmu_offs, drv_grf_offs, drv_pmu_offs, i, j;
@@ -3337,23 +2481,6 @@ static struct rockchip_pin_ctrl *rockchip_pinctrl_get_soc_data(
        match = of_match_node(rockchip_pinctrl_dt_match, node);
        ctrl = (struct rockchip_pin_ctrl *)match->data;
 
-       for_each_child_of_node(node, np) {
-               if (!of_find_property(np, "gpio-controller", NULL))
-                       continue;
-
-               bank = ctrl->pin_banks;
-               for (i = 0; i < ctrl->nr_banks; ++i, ++bank) {
-                       if (!strcmp(bank->name, np->name)) {
-                               bank->of_node = np;
-
-                               if (!rockchip_get_bank_data(bank, d))
-                                       bank->valid = true;
-
-                               break;
-                       }
-               }
-       }
-
        grf_offs = ctrl->grf_mux_offset;
        pmu_offs = ctrl->pmu_mux_offset;
        drv_pmu_offs = ctrl->pmu_drv_offset;
@@ -3574,18 +2701,18 @@ static int rockchip_pinctrl_probe(struct platform_device *pdev)
                        return PTR_ERR(info->regmap_pmu);
        }
 
-       ret = rockchip_gpiolib_register(pdev, info);
+       ret = rockchip_pinctrl_register(pdev, info);
        if (ret)
                return ret;
 
-       ret = rockchip_pinctrl_register(pdev, info);
+       platform_set_drvdata(pdev, info);
+
+       ret = of_platform_populate(np, rockchip_bank_match, NULL, NULL);
        if (ret) {
-               rockchip_gpiolib_unregister(pdev, info);
+               dev_err(&pdev->dev, "failed to register gpio device\n");
                return ret;
        }
 
-       platform_set_drvdata(pdev, info);
-
        return 0;
 }
 
diff --git a/drivers/pinctrl/pinctrl-rockchip.h b/drivers/pinctrl/pinctrl-rockchip.h
new file mode 100644 (file)
index 0000000..589d4d2
--- /dev/null
@@ -0,0 +1,287 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020-2021 Rockchip Electronics Co. Ltd.
+ *
+ * Copyright (c) 2013 MundoReader S.L.
+ * Author: Heiko Stuebner <heiko@sntech.de>
+ *
+ * With some ideas taken from pinctrl-samsung:
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com
+ * Copyright (c) 2012 Linaro Ltd
+ *             https://www.linaro.org
+ *
+ * and pinctrl-at91:
+ * Copyright (C) 2011-2012 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>
+ */
+
+#ifndef _PINCTRL_ROCKCHIP_H
+#define _PINCTRL_ROCKCHIP_H
+
+enum rockchip_pinctrl_type {
+       PX30,
+       RV1108,
+       RK2928,
+       RK3066B,
+       RK3128,
+       RK3188,
+       RK3288,
+       RK3308,
+       RK3368,
+       RK3399,
+       RK3568,
+};
+
+/**
+ * struct rockchip_gpio_regs
+ * @port_dr: data register
+ * @port_ddr: data direction register
+ * @int_en: interrupt enable
+ * @int_mask: interrupt mask
+ * @int_type: interrupt trigger type, such as high, low, edge trriger type.
+ * @int_polarity: interrupt polarity enable register
+ * @int_bothedge: interrupt bothedge enable register
+ * @int_status: interrupt status register
+ * @int_rawstatus: int_status = int_rawstatus & int_mask
+ * @debounce: enable debounce for interrupt signal
+ * @dbclk_div_en: enable divider for debounce clock
+ * @dbclk_div_con: setting for divider of debounce clock
+ * @port_eoi: end of interrupt of the port
+ * @ext_port: port data from external
+ * @version_id: controller version register
+ */
+struct rockchip_gpio_regs {
+       u32 port_dr;
+       u32 port_ddr;
+       u32 int_en;
+       u32 int_mask;
+       u32 int_type;
+       u32 int_polarity;
+       u32 int_bothedge;
+       u32 int_status;
+       u32 int_rawstatus;
+       u32 debounce;
+       u32 dbclk_div_en;
+       u32 dbclk_div_con;
+       u32 port_eoi;
+       u32 ext_port;
+       u32 version_id;
+};
+
+/**
+ * struct rockchip_iomux
+ * @type: iomux variant using IOMUX_* constants
+ * @offset: if initialized to -1 it will be autocalculated, by specifying
+ *         an initial offset value the relevant source offset can be reset
+ *         to a new value for autocalculating the following iomux registers.
+ */
+struct rockchip_iomux {
+       int type;
+       int offset;
+};
+
+/*
+ * enum type index corresponding to rockchip_perpin_drv_list arrays index.
+ */
+enum rockchip_pin_drv_type {
+       DRV_TYPE_IO_DEFAULT = 0,
+       DRV_TYPE_IO_1V8_OR_3V0,
+       DRV_TYPE_IO_1V8_ONLY,
+       DRV_TYPE_IO_1V8_3V0_AUTO,
+       DRV_TYPE_IO_3V3_ONLY,
+       DRV_TYPE_MAX
+};
+
+/*
+ * enum type index corresponding to rockchip_pull_list arrays index.
+ */
+enum rockchip_pin_pull_type {
+       PULL_TYPE_IO_DEFAULT = 0,
+       PULL_TYPE_IO_1V8_ONLY,
+       PULL_TYPE_MAX
+};
+
+/**
+ * struct rockchip_drv
+ * @drv_type: drive strength variant using rockchip_perpin_drv_type
+ * @offset: if initialized to -1 it will be autocalculated, by specifying
+ *         an initial offset value the relevant source offset can be reset
+ *         to a new value for autocalculating the following drive strength
+ *         registers. if used chips own cal_drv func instead to calculate
+ *         registers offset, the variant could be ignored.
+ */
+struct rockchip_drv {
+       enum rockchip_pin_drv_type      drv_type;
+       int                             offset;
+};
+
+/**
+ * struct rockchip_pin_bank
+ * @dev: the pinctrl device bind to the bank
+ * @reg_base: register base of the gpio bank
+ * @regmap_pull: optional separate register for additional pull settings
+ * @clk: clock of the gpio bank
+ * @db_clk: clock of the gpio debounce
+ * @irq: interrupt of the gpio bank
+ * @saved_masks: Saved content of GPIO_INTEN at suspend time.
+ * @pin_base: first pin number
+ * @nr_pins: number of pins in this bank
+ * @name: name of the bank
+ * @bank_num: number of the bank, to account for holes
+ * @iomux: array describing the 4 iomux sources of the bank
+ * @drv: array describing the 4 drive strength sources of the bank
+ * @pull_type: array describing the 4 pull type sources of the bank
+ * @valid: is all necessary information present
+ * @of_node: dt node of this bank
+ * @drvdata: common pinctrl basedata
+ * @domain: irqdomain of the gpio bank
+ * @gpio_chip: gpiolib chip
+ * @grange: gpio range
+ * @slock: spinlock for the gpio bank
+ * @toggle_edge_mode: bit mask to toggle (falling/rising) edge mode
+ * @recalced_mask: bit mask to indicate a need to recalulate the mask
+ * @route_mask: bits describing the routing pins of per bank
+ */
+struct rockchip_pin_bank {
+       struct device                   *dev;
+       void __iomem                    *reg_base;
+       struct regmap                   *regmap_pull;
+       struct clk                      *clk;
+       struct clk                      *db_clk;
+       int                             irq;
+       u32                             saved_masks;
+       u32                             pin_base;
+       u8                              nr_pins;
+       char                            *name;
+       u8                              bank_num;
+       struct rockchip_iomux           iomux[4];
+       struct rockchip_drv             drv[4];
+       enum rockchip_pin_pull_type     pull_type[4];
+       bool                            valid;
+       struct device_node              *of_node;
+       struct rockchip_pinctrl         *drvdata;
+       struct irq_domain               *domain;
+       struct gpio_chip                gpio_chip;
+       struct pinctrl_gpio_range       grange;
+       raw_spinlock_t                  slock;
+       const struct rockchip_gpio_regs *gpio_regs;
+       u32                             gpio_type;
+       u32                             toggle_edge_mode;
+       u32                             recalced_mask;
+       u32                             route_mask;
+};
+
+/**
+ * struct rockchip_mux_recalced_data: represent a pin iomux data.
+ * @num: bank number.
+ * @pin: pin number.
+ * @bit: index at register.
+ * @reg: register offset.
+ * @mask: mask bit
+ */
+struct rockchip_mux_recalced_data {
+       u8 num;
+       u8 pin;
+       u32 reg;
+       u8 bit;
+       u8 mask;
+};
+
+enum rockchip_mux_route_location {
+       ROCKCHIP_ROUTE_SAME = 0,
+       ROCKCHIP_ROUTE_PMU,
+       ROCKCHIP_ROUTE_GRF,
+};
+
+/**
+ * struct rockchip_mux_recalced_data: represent a pin iomux data.
+ * @bank_num: bank number.
+ * @pin: index at register or used to calc index.
+ * @func: the min pin.
+ * @route_location: the mux route location (same, pmu, grf).
+ * @route_offset: the max pin.
+ * @route_val: the register offset.
+ */
+struct rockchip_mux_route_data {
+       u8 bank_num;
+       u8 pin;
+       u8 func;
+       enum rockchip_mux_route_location route_location;
+       u32 route_offset;
+       u32 route_val;
+};
+
+struct rockchip_pin_ctrl {
+       struct rockchip_pin_bank        *pin_banks;
+       u32                             nr_banks;
+       u32                             nr_pins;
+       char                            *label;
+       enum rockchip_pinctrl_type      type;
+       int                             grf_mux_offset;
+       int                             pmu_mux_offset;
+       int                             grf_drv_offset;
+       int                             pmu_drv_offset;
+       struct rockchip_mux_recalced_data *iomux_recalced;
+       u32                             niomux_recalced;
+       struct rockchip_mux_route_data *iomux_routes;
+       u32                             niomux_routes;
+
+       void    (*pull_calc_reg)(struct rockchip_pin_bank *bank,
+                                   int pin_num, struct regmap **regmap,
+                                   int *reg, u8 *bit);
+       void    (*drv_calc_reg)(struct rockchip_pin_bank *bank,
+                                   int pin_num, struct regmap **regmap,
+                                   int *reg, u8 *bit);
+       int     (*schmitt_calc_reg)(struct rockchip_pin_bank *bank,
+                                   int pin_num, struct regmap **regmap,
+                                   int *reg, u8 *bit);
+};
+
+struct rockchip_pin_config {
+       unsigned int            func;
+       unsigned long           *configs;
+       unsigned int            nconfigs;
+};
+
+/**
+ * struct rockchip_pin_group: represent group of pins of a pinmux function.
+ * @name: name of the pin group, used to lookup the group.
+ * @pins: the pins included in this group.
+ * @npins: number of pins included in this group.
+ * @data: local pin configuration
+ */
+struct rockchip_pin_group {
+       const char                      *name;
+       unsigned int                    npins;
+       unsigned int                    *pins;
+       struct rockchip_pin_config      *data;
+};
+
+/**
+ * struct rockchip_pmx_func: represent a pin function.
+ * @name: name of the pin function, used to lookup the function.
+ * @groups: one or more names of pin groups that provide this function.
+ * @ngroups: number of groups included in @groups.
+ */
+struct rockchip_pmx_func {
+       const char              *name;
+       const char              **groups;
+       u8                      ngroups;
+};
+
+struct rockchip_pinctrl {
+       struct regmap                   *regmap_base;
+       int                             reg_size;
+       struct regmap                   *regmap_pull;
+       struct regmap                   *regmap_pmu;
+       struct device                   *dev;
+       struct rockchip_pin_ctrl        *ctrl;
+       struct pinctrl_desc             pctl;
+       struct pinctrl_dev              *pctl_dev;
+       struct rockchip_pin_group       *groups;
+       unsigned int                    ngroups;
+       struct rockchip_pmx_func        *functions;
+       unsigned int                    nfunctions;
+};
+
+#endif
index e3aa647..aa6e722 100644 (file)
@@ -1491,8 +1491,8 @@ static int pcs_irq_handle(struct pcs_soc_data *pcs_soc)
                mask = pcs->read(pcswi->reg);
                raw_spin_unlock(&pcs->lock);
                if (mask & pcs_soc->irq_status_mask) {
-                       generic_handle_irq(irq_find_mapping(pcs->domain,
-                                                           pcswi->hwirq));
+                       generic_handle_domain_irq(pcs->domain,
+                                                 pcswi->hwirq);
                        count++;
                }
        }
index 43d9e6c..fa3edb4 100644 (file)
@@ -1420,7 +1420,7 @@ static void __gpio_irq_handler(struct st_gpio_bank *bank)
                                        continue;
                        }
 
-                       generic_handle_irq(irq_find_mapping(bank->gpio_chip.irq.domain, n));
+                       generic_handle_domain_irq(bank->gpio_chip.irq.domain, n);
                }
        }
 }
index 2f51b4f..cad4e60 100644 (file)
@@ -13,7 +13,7 @@ config PINCTRL_MSM
 
 config PINCTRL_APQ8064
        tristate "Qualcomm APQ8064 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -21,7 +21,7 @@ config PINCTRL_APQ8064
 
 config PINCTRL_APQ8084
        tristate "Qualcomm APQ8084 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -29,7 +29,7 @@ config PINCTRL_APQ8084
 
 config PINCTRL_IPQ4019
        tristate "Qualcomm IPQ4019 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -37,7 +37,7 @@ config PINCTRL_IPQ4019
 
 config PINCTRL_IPQ8064
        tristate "Qualcomm IPQ8064 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -45,7 +45,7 @@ config PINCTRL_IPQ8064
 
 config PINCTRL_IPQ8074
        tristate "Qualcomm Technologies, Inc. IPQ8074 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for
@@ -55,7 +55,7 @@ config PINCTRL_IPQ8074
 
 config PINCTRL_IPQ6018
        tristate "Qualcomm Technologies, Inc. IPQ6018 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for
@@ -65,7 +65,7 @@ config PINCTRL_IPQ6018
 
 config PINCTRL_MSM8226
        tristate "Qualcomm 8226 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -74,7 +74,7 @@ config PINCTRL_MSM8226
 
 config PINCTRL_MSM8660
        tristate "Qualcomm 8660 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -82,7 +82,7 @@ config PINCTRL_MSM8660
 
 config PINCTRL_MSM8960
        tristate "Qualcomm 8960 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -90,7 +90,7 @@ config PINCTRL_MSM8960
 
 config PINCTRL_MDM9615
        tristate "Qualcomm 9615 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -98,7 +98,7 @@ config PINCTRL_MDM9615
 
 config PINCTRL_MSM8X74
        tristate "Qualcomm 8x74 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -106,7 +106,7 @@ config PINCTRL_MSM8X74
 
 config PINCTRL_MSM8916
        tristate "Qualcomm 8916 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -114,7 +114,7 @@ config PINCTRL_MSM8916
 
 config PINCTRL_MSM8953
        tristate "Qualcomm 8953 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -124,7 +124,7 @@ config PINCTRL_MSM8953
 
 config PINCTRL_MSM8976
        tristate "Qualcomm 8976 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -134,7 +134,7 @@ config PINCTRL_MSM8976
 
 config PINCTRL_MSM8994
        tristate "Qualcomm 8994 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -143,7 +143,7 @@ config PINCTRL_MSM8994
 
 config PINCTRL_MSM8996
        tristate "Qualcomm MSM8996 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -151,7 +151,7 @@ config PINCTRL_MSM8996
 
 config PINCTRL_MSM8998
        tristate "Qualcomm MSM8998 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -159,7 +159,7 @@ config PINCTRL_MSM8998
 
 config PINCTRL_QCS404
        tristate "Qualcomm QCS404 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -167,7 +167,7 @@ config PINCTRL_QCS404
 
 config PINCTRL_QDF2XXX
        tristate "Qualcomm Technologies QDF2xxx pin controller driver"
-       depends on GPIOLIB && ACPI
+       depends on ACPI
        depends on PINCTRL_MSM
        help
          This is the GPIO driver for the TLMM block found on the
@@ -175,7 +175,7 @@ config PINCTRL_QDF2XXX
 
 config PINCTRL_QCOM_SPMI_PMIC
        tristate "Qualcomm SPMI PMIC pin controller driver"
-       depends on GPIOLIB && OF && SPMI
+       depends on OF && SPMI
        select REGMAP_SPMI
        select PINMUX
        select PINCONF
@@ -190,7 +190,7 @@ config PINCTRL_QCOM_SPMI_PMIC
 
 config PINCTRL_QCOM_SSBI_PMIC
        tristate "Qualcomm SSBI PMIC pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        select PINMUX
        select PINCONF
        select GENERIC_PINCONF
@@ -204,7 +204,7 @@ config PINCTRL_QCOM_SSBI_PMIC
 
 config PINCTRL_SC7180
        tristate "Qualcomm Technologies Inc SC7180 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -213,7 +213,7 @@ config PINCTRL_SC7180
 
 config PINCTRL_SC7280
        tristate "Qualcomm Technologies Inc SC7280 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -222,7 +222,7 @@ config PINCTRL_SC7280
 
 config PINCTRL_SC8180X
        tristate "Qualcomm Technologies Inc SC8180x pin controller driver"
-       depends on GPIOLIB && (OF || ACPI)
+       depends on (OF || ACPI)
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -231,7 +231,7 @@ config PINCTRL_SC8180X
 
 config PINCTRL_SDM660
        tristate "Qualcomm Technologies Inc SDM660 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
         This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -240,7 +240,7 @@ config PINCTRL_SDM660
 
 config PINCTRL_SDM845
        tristate "Qualcomm Technologies Inc SDM845 pin controller driver"
-       depends on GPIOLIB && (OF || ACPI)
+       depends on (OF || ACPI)
        depends on PINCTRL_MSM
        help
         This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -249,7 +249,7 @@ config PINCTRL_SDM845
 
 config PINCTRL_SDX55
        tristate "Qualcomm Technologies Inc SDX55 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
         This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -258,7 +258,7 @@ config PINCTRL_SDX55
 
 config PINCTRL_SM6125
        tristate "Qualcomm Technologies Inc SM6125 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
         This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -267,7 +267,7 @@ config PINCTRL_SM6125
 
 config PINCTRL_SM8150
        tristate "Qualcomm Technologies Inc SM8150 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
         This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -276,7 +276,7 @@ config PINCTRL_SM8150
 
 config PINCTRL_SM8250
        tristate "Qualcomm Technologies Inc SM8250 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -285,8 +285,7 @@ config PINCTRL_SM8250
 
 config PINCTRL_SM8350
        tristate "Qualcomm Technologies Inc SM8350 pin controller driver"
-       depends on GPIOLIB && OF
-       select PINCTRL_MSM
+       depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
          Qualcomm Technologies Inc TLMM block found on the Qualcomm
index d70caec..8476a8a 100644 (file)
@@ -1177,7 +1177,6 @@ static void msm_gpio_irq_handler(struct irq_desc *desc)
        const struct msm_pingroup *g;
        struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
        struct irq_chip *chip = irq_desc_get_chip(desc);
-       int irq_pin;
        int handled = 0;
        u32 val;
        int i;
@@ -1192,8 +1191,7 @@ static void msm_gpio_irq_handler(struct irq_desc *desc)
                g = &pctrl->soc->groups[i];
                val = msm_readl_intr_status(pctrl, g);
                if (val & BIT(g->intr_status_bit)) {
-                       irq_pin = irq_find_mapping(gc->irq.domain, i);
-                       generic_handle_irq(irq_pin);
+                       generic_handle_domain_irq(gc->irq.domain, i);
                        handled++;
                }
        }
index 2b99f41..0489c89 100644 (file)
@@ -246,7 +246,8 @@ static irqreturn_t exynos_eint_gpio_irq(int irq, void *data)
 {
        struct samsung_pinctrl_drv_data *d = data;
        struct samsung_pin_bank *bank = d->pin_banks;
-       unsigned int svc, group, pin, virq;
+       unsigned int svc, group, pin;
+       int ret;
 
        svc = readl(bank->eint_base + EXYNOS_SVC_OFFSET);
        group = EXYNOS_SVC_GROUP(svc);
@@ -256,10 +257,10 @@ static irqreturn_t exynos_eint_gpio_irq(int irq, void *data)
                return IRQ_HANDLED;
        bank += (group - 1);
 
-       virq = irq_linear_revmap(bank->irq_domain, pin);
-       if (!virq)
+       ret = generic_handle_domain_irq(bank->irq_domain, pin);
+       if (ret)
                return IRQ_NONE;
-       generic_handle_irq(virq);
+
        return IRQ_HANDLED;
 }
 
@@ -473,12 +474,10 @@ static void exynos_irq_eint0_15(struct irq_desc *desc)
        struct exynos_weint_data *eintd = irq_desc_get_handler_data(desc);
        struct samsung_pin_bank *bank = eintd->bank;
        struct irq_chip *chip = irq_desc_get_chip(desc);
-       int eint_irq;
 
        chained_irq_enter(chip, desc);
 
-       eint_irq = irq_linear_revmap(bank->irq_domain, eintd->irq);
-       generic_handle_irq(eint_irq);
+       generic_handle_domain_irq(bank->irq_domain, eintd->irq);
 
        chained_irq_exit(chip, desc);
 }
@@ -490,7 +489,7 @@ static inline void exynos_irq_demux_eint(unsigned int pend,
 
        while (pend) {
                irq = fls(pend) - 1;
-               generic_handle_irq(irq_find_mapping(domain, irq));
+               generic_handle_domain_irq(domain, irq);
                pend &= ~(1 << irq);
        }
 }
index 00d77d6..ac1eba3 100644 (file)
@@ -234,14 +234,12 @@ static void s3c2410_demux_eint0_3(struct irq_desc *desc)
 {
        struct irq_data *data = irq_desc_get_irq_data(desc);
        struct s3c24xx_eint_data *eint_data = irq_desc_get_handler_data(desc);
-       unsigned int virq;
+       int ret;
 
        /* the first 4 eints have a simple 1 to 1 mapping */
-       virq = irq_linear_revmap(eint_data->domains[data->hwirq], data->hwirq);
+       ret = generic_handle_domain_irq(eint_data->domains[data->hwirq], data->hwirq);
        /* Something must be really wrong if an unmapped EINT is unmasked */
-       BUG_ON(!virq);
-
-       generic_handle_irq(virq);
+       BUG_ON(ret);
 }
 
 /* Handling of EINTs 0-3 on S3C2412 and S3C2413 */
@@ -290,16 +288,14 @@ static void s3c2412_demux_eint0_3(struct irq_desc *desc)
        struct s3c24xx_eint_data *eint_data = irq_desc_get_handler_data(desc);
        struct irq_data *data = irq_desc_get_irq_data(desc);
        struct irq_chip *chip = irq_data_get_irq_chip(data);
-       unsigned int virq;
+       int ret;
 
        chained_irq_enter(chip, desc);
 
        /* the first 4 eints have a simple 1 to 1 mapping */
-       virq = irq_linear_revmap(eint_data->domains[data->hwirq], data->hwirq);
+       ret = generic_handle_domain_irq(eint_data->domains[data->hwirq], data->hwirq);
        /* Something must be really wrong if an unmapped EINT is unmasked */
-       BUG_ON(!virq);
-
-       generic_handle_irq(virq);
+       BUG_ON(ret);
 
        chained_irq_exit(chip, desc);
 }
@@ -364,15 +360,14 @@ static inline void s3c24xx_demux_eint(struct irq_desc *desc,
        pend &= range;
 
        while (pend) {
-               unsigned int virq, irq;
+               unsigned int irq;
+               int ret;
 
                irq = __ffs(pend);
                pend &= ~(1 << irq);
-               virq = irq_linear_revmap(data->domains[irq], irq - offset);
+               ret = generic_handle_domain_irq(data->domains[irq], irq - offset);
                /* Something is really wrong if an unmapped EINT is unmasked */
-               BUG_ON(!virq);
-
-               generic_handle_irq(virq);
+               BUG_ON(ret);
        }
 
        chained_irq_exit(chip, desc);
index 53e2a64..c5f95a1 100644 (file)
@@ -414,7 +414,7 @@ static void s3c64xx_eint_gpio_irq(struct irq_desc *desc)
                unsigned int svc;
                unsigned int group;
                unsigned int pin;
-               unsigned int virq;
+               int ret;
 
                svc = readl(drvdata->virt_base + SERVICE_REG);
                group = SVC_GROUP(svc);
@@ -431,14 +431,12 @@ static void s3c64xx_eint_gpio_irq(struct irq_desc *desc)
                                pin -= 8;
                }
 
-               virq = irq_linear_revmap(data->domains[group], pin);
+               ret = generic_handle_domain_irq(data->domains[group], pin);
                /*
                 * Something must be really wrong if an unmapped EINT
                 * was unmasked...
                 */
-               BUG_ON(!virq);
-
-               generic_handle_irq(virq);
+               BUG_ON(ret);
        } while (1);
 
        chained_irq_exit(chip, desc);
@@ -607,18 +605,17 @@ static inline void s3c64xx_irq_demux_eint(struct irq_desc *desc, u32 range)
        pend &= range;
 
        while (pend) {
-               unsigned int virq, irq;
+               unsigned int irq;
+               int ret;
 
                irq = fls(pend) - 1;
                pend &= ~(1 << irq);
-               virq = irq_linear_revmap(data->domains[irq], data->pins[irq]);
+               ret = generic_handle_domain_irq(data->domains[irq], data->pins[irq]);
                /*
                 * Something must be really wrong if an unmapped EINT
                 * was unmasked...
                 */
-               BUG_ON(!virq);
-
-               generic_handle_irq(virq);
+               BUG_ON(ret);
        }
 
        chained_irq_exit(chip, desc);
index 1ebbc49..43bb334 100644 (file)
@@ -400,8 +400,7 @@ static void plgpio_irq_handler(struct irq_desc *desc)
 
                        /* get correct irq line number */
                        pin = i * MAX_GPIO_PER_REG + pin;
-                       generic_handle_irq(
-                               irq_find_mapping(gc->irq.domain, pin));
+                       generic_handle_domain_irq(gc->irq.domain, pin);
                }
        }
        chained_irq_exit(irqchip, desc);
index dc8d39a..862c84e 100644 (file)
@@ -1149,11 +1149,9 @@ static void sunxi_pinctrl_irq_handler(struct irq_desc *desc)
        if (val) {
                int irqoffset;
 
-               for_each_set_bit(irqoffset, &val, IRQ_PER_BANK) {
-                       int pin_irq = irq_find_mapping(pctl->domain,
-                                                      bank * IRQ_PER_BANK + irqoffset);
-                       generic_handle_irq(pin_irq);
-               }
+               for_each_set_bit(irqoffset, &val, IRQ_PER_BANK)
+                       generic_handle_domain_irq(pctl->domain,
+                                                 bank * IRQ_PER_BANK + irqoffset);
        }
 
        chained_irq_exit(chip, desc);
@@ -1219,10 +1217,12 @@ static int sunxi_pinctrl_build_state(struct platform_device *pdev)
        }
 
        /*
-        * We suppose that we won't have any more functions than pins,
-        * we'll reallocate that later anyway
+        * Find an upper bound for the maximum number of functions: in
+        * the worst case we have gpio_in, gpio_out, irq and up to four
+        * special functions per pin, plus one entry for the sentinel.
+        * We'll reallocate that later anyway.
         */
-       pctl->functions = kcalloc(pctl->ngroups,
+       pctl->functions = kcalloc(4 * pctl->ngroups + 4,
                                  sizeof(*pctl->functions),
                                  GFP_KERNEL);
        if (!pctl->functions)
index 7d385c3..d12db6c 100644 (file)
@@ -508,6 +508,7 @@ config THINKPAD_ACPI
        depends on RFKILL || RFKILL = n
        depends on ACPI_VIDEO || ACPI_VIDEO = n
        depends on BACKLIGHT_CLASS_DEVICE
+       depends on I2C
        select ACPI_PLATFORM_PROFILE
        select HWMON
        select NVRAM
@@ -691,6 +692,7 @@ config INTEL_HID_EVENT
        tristate "INTEL HID Event"
        depends on ACPI
        depends on INPUT
+       depends on I2C
        select INPUT_SPARSEKMAP
        help
          This driver provides support for the Intel HID Event hotkey interface.
@@ -742,6 +744,7 @@ config INTEL_VBTN
        tristate "INTEL VIRTUAL BUTTON"
        depends on ACPI
        depends on INPUT
+       depends on I2C
        select INPUT_SPARSEKMAP
        help
          This driver provides support for the Intel Virtual Button interface.
index b9da58e..3481479 100644 (file)
 #define AMD_PMC_RESULT_CMD_UNKNOWN           0xFE
 #define AMD_PMC_RESULT_FAILED                0xFF
 
+/* FCH SSC Registers */
+#define FCH_S0I3_ENTRY_TIME_L_OFFSET   0x30
+#define FCH_S0I3_ENTRY_TIME_H_OFFSET   0x34
+#define FCH_S0I3_EXIT_TIME_L_OFFSET    0x38
+#define FCH_S0I3_EXIT_TIME_H_OFFSET    0x3C
+#define FCH_SSC_MAPPING_SIZE           0x800
+#define FCH_BASE_PHY_ADDR_LOW          0xFED81100
+#define FCH_BASE_PHY_ADDR_HIGH         0x00000000
+
+/* SMU Message Definations */
+#define SMU_MSG_GETSMUVERSION          0x02
+#define SMU_MSG_LOG_GETDRAM_ADDR_HI    0x04
+#define SMU_MSG_LOG_GETDRAM_ADDR_LO    0x05
+#define SMU_MSG_LOG_START              0x06
+#define SMU_MSG_LOG_RESET              0x07
+#define SMU_MSG_LOG_DUMP_DATA          0x08
+#define SMU_MSG_GET_SUP_CONSTRAINTS    0x09
 /* List of supported CPU ids */
 #define AMD_CPU_ID_RV                  0x15D0
 #define AMD_CPU_ID_RN                  0x1630
 #define AMD_CPU_ID_PCO                 AMD_CPU_ID_RV
 #define AMD_CPU_ID_CZN                 AMD_CPU_ID_RN
+#define AMD_CPU_ID_YC                  0x14B5
 
-#define AMD_SMU_FW_VERSION             0x0
 #define PMC_MSG_DELAY_MIN_US           100
 #define RESPONSE_REGISTER_LOOP_MAX     200
 
+#define SOC_SUBSYSTEM_IP_MAX   12
+#define DELAY_MIN_US           2000
+#define DELAY_MAX_US           3000
 enum amd_pmc_def {
        MSG_TEST = 0x01,
        MSG_OS_HINT_PCO,
        MSG_OS_HINT_RN,
 };
 
+struct amd_pmc_bit_map {
+       const char *name;
+       u32 bit_mask;
+};
+
+static const struct amd_pmc_bit_map soc15_ip_blk[] = {
+       {"DISPLAY",     BIT(0)},
+       {"CPU",         BIT(1)},
+       {"GFX",         BIT(2)},
+       {"VDD",         BIT(3)},
+       {"ACP",         BIT(4)},
+       {"VCN",         BIT(5)},
+       {"ISP",         BIT(6)},
+       {"NBIO",        BIT(7)},
+       {"DF",          BIT(8)},
+       {"USB0",        BIT(9)},
+       {"USB1",        BIT(10)},
+       {"LAPIC",       BIT(11)},
+       {}
+};
+
 struct amd_pmc_dev {
        void __iomem *regbase;
-       void __iomem *smu_base;
+       void __iomem *smu_virt_addr;
+       void __iomem *fch_virt_addr;
        u32 base_addr;
        u32 cpu_id;
+       u32 active_ips;
        struct device *dev;
+       struct mutex lock; /* generic mutex lock */
 #if IS_ENABLED(CONFIG_DEBUG_FS)
        struct dentry *dbgfs_dir;
 #endif /* CONFIG_DEBUG_FS */
 };
 
 static struct amd_pmc_dev pmc;
+static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set, u32 *data, u8 msg, bool ret);
 
 static inline u32 amd_pmc_reg_read(struct amd_pmc_dev *dev, int reg_offset)
 {
@@ -85,18 +130,77 @@ static inline void amd_pmc_reg_write(struct amd_pmc_dev *dev, int reg_offset, u3
        iowrite32(val, dev->regbase + reg_offset);
 }
 
+struct smu_metrics {
+       u32 table_version;
+       u32 hint_count;
+       u32 s0i3_cyclecount;
+       u32 timein_s0i2;
+       u64 timeentering_s0i3_lastcapture;
+       u64 timeentering_s0i3_totaltime;
+       u64 timeto_resume_to_os_lastcapture;
+       u64 timeto_resume_to_os_totaltime;
+       u64 timein_s0i3_lastcapture;
+       u64 timein_s0i3_totaltime;
+       u64 timein_swdrips_lastcapture;
+       u64 timein_swdrips_totaltime;
+       u64 timecondition_notmet_lastcapture[SOC_SUBSYSTEM_IP_MAX];
+       u64 timecondition_notmet_totaltime[SOC_SUBSYSTEM_IP_MAX];
+} __packed;
+
 #ifdef CONFIG_DEBUG_FS
 static int smu_fw_info_show(struct seq_file *s, void *unused)
 {
        struct amd_pmc_dev *dev = s->private;
-       u32 value;
+       struct smu_metrics table;
+       int idx;
+
+       if (dev->cpu_id == AMD_CPU_ID_PCO)
+               return -EINVAL;
+
+       memcpy_fromio(&table, dev->smu_virt_addr, sizeof(struct smu_metrics));
+
+       seq_puts(s, "\n=== SMU Statistics ===\n");
+       seq_printf(s, "Table Version: %d\n", table.table_version);
+       seq_printf(s, "Hint Count: %d\n", table.hint_count);
+       seq_printf(s, "S0i3 Cycle Count: %d\n", table.s0i3_cyclecount);
+       seq_printf(s, "Time (in us) to S0i3: %lld\n", table.timeentering_s0i3_lastcapture);
+       seq_printf(s, "Time (in us) in S0i3: %lld\n", table.timein_s0i3_lastcapture);
+
+       seq_puts(s, "\n=== Active time (in us) ===\n");
+       for (idx = 0 ; idx < SOC_SUBSYSTEM_IP_MAX ; idx++) {
+               if (soc15_ip_blk[idx].bit_mask & dev->active_ips)
+                       seq_printf(s, "%-8s : %lld\n", soc15_ip_blk[idx].name,
+                                  table.timecondition_notmet_lastcapture[idx]);
+       }
 
-       value = ioread32(dev->smu_base + AMD_SMU_FW_VERSION);
-       seq_printf(s, "SMU FW Info: %x\n", value);
        return 0;
 }
 DEFINE_SHOW_ATTRIBUTE(smu_fw_info);
 
+static int s0ix_stats_show(struct seq_file *s, void *unused)
+{
+       struct amd_pmc_dev *dev = s->private;
+       u64 entry_time, exit_time, residency;
+
+       entry_time = ioread32(dev->fch_virt_addr + FCH_S0I3_ENTRY_TIME_H_OFFSET);
+       entry_time = entry_time << 32 | ioread32(dev->fch_virt_addr + FCH_S0I3_ENTRY_TIME_L_OFFSET);
+
+       exit_time = ioread32(dev->fch_virt_addr + FCH_S0I3_EXIT_TIME_H_OFFSET);
+       exit_time = exit_time << 32 | ioread32(dev->fch_virt_addr + FCH_S0I3_EXIT_TIME_L_OFFSET);
+
+       /* It's in 48MHz. We need to convert it */
+       residency = exit_time - entry_time;
+       do_div(residency, 48);
+
+       seq_puts(s, "=== S0ix statistics ===\n");
+       seq_printf(s, "S0ix Entry Time: %lld\n", entry_time);
+       seq_printf(s, "S0ix Exit Time: %lld\n", exit_time);
+       seq_printf(s, "Residency Time: %lld\n", residency);
+
+       return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(s0ix_stats);
+
 static void amd_pmc_dbgfs_unregister(struct amd_pmc_dev *dev)
 {
        debugfs_remove_recursive(dev->dbgfs_dir);
@@ -107,6 +211,8 @@ static void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev)
        dev->dbgfs_dir = debugfs_create_dir("amd_pmc", NULL);
        debugfs_create_file("smu_fw_info", 0644, dev->dbgfs_dir, dev,
                            &smu_fw_info_fops);
+       debugfs_create_file("s0ix_stats", 0644, dev->dbgfs_dir, dev,
+                           &s0ix_stats_fops);
 }
 #else
 static inline void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev)
@@ -118,6 +224,32 @@ static inline void amd_pmc_dbgfs_unregister(struct amd_pmc_dev *dev)
 }
 #endif /* CONFIG_DEBUG_FS */
 
+static int amd_pmc_setup_smu_logging(struct amd_pmc_dev *dev)
+{
+       u32 phys_addr_low, phys_addr_hi;
+       u64 smu_phys_addr;
+
+       if (dev->cpu_id == AMD_CPU_ID_PCO)
+               return -EINVAL;
+
+       /* Get Active devices list from SMU */
+       amd_pmc_send_cmd(dev, 0, &dev->active_ips, SMU_MSG_GET_SUP_CONSTRAINTS, 1);
+
+       /* Get dram address */
+       amd_pmc_send_cmd(dev, 0, &phys_addr_low, SMU_MSG_LOG_GETDRAM_ADDR_LO, 1);
+       amd_pmc_send_cmd(dev, 0, &phys_addr_hi, SMU_MSG_LOG_GETDRAM_ADDR_HI, 1);
+       smu_phys_addr = ((u64)phys_addr_hi << 32 | phys_addr_low);
+
+       dev->smu_virt_addr = devm_ioremap(dev->dev, smu_phys_addr, sizeof(struct smu_metrics));
+       if (!dev->smu_virt_addr)
+               return -ENOMEM;
+
+       /* Start the logging */
+       amd_pmc_send_cmd(dev, 0, NULL, SMU_MSG_LOG_START, 0);
+
+       return 0;
+}
+
 static void amd_pmc_dump_registers(struct amd_pmc_dev *dev)
 {
        u32 value;
@@ -132,19 +264,19 @@ static void amd_pmc_dump_registers(struct amd_pmc_dev *dev)
        dev_dbg(dev->dev, "AMD_PMC_REGISTER_MESSAGE:%x\n", value);
 }
 
-static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set)
+static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set, u32 *data, u8 msg, bool ret)
 {
        int rc;
-       u8 msg;
        u32 val;
 
+       mutex_lock(&dev->lock);
        /* Wait until we get a valid response */
        rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMC_REGISTER_RESPONSE,
-                               val, val > 0, PMC_MSG_DELAY_MIN_US,
+                               val, val != 0, PMC_MSG_DELAY_MIN_US,
                                PMC_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
        if (rc) {
                dev_err(dev->dev, "failed to talk to SMU\n");
-               return rc;
+               goto out_unlock;
        }
 
        /* Write zero to response register */
@@ -154,34 +286,91 @@ static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set)
        amd_pmc_reg_write(dev, AMD_PMC_REGISTER_ARGUMENT, set);
 
        /* Write message ID to message ID register */
-       msg = (dev->cpu_id == AMD_CPU_ID_RN) ? MSG_OS_HINT_RN : MSG_OS_HINT_PCO;
        amd_pmc_reg_write(dev, AMD_PMC_REGISTER_MESSAGE, msg);
-       return 0;
+
+       /* Wait until we get a valid response */
+       rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMC_REGISTER_RESPONSE,
+                               val, val != 0, PMC_MSG_DELAY_MIN_US,
+                               PMC_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
+       if (rc) {
+               dev_err(dev->dev, "SMU response timed out\n");
+               goto out_unlock;
+       }
+
+       switch (val) {
+       case AMD_PMC_RESULT_OK:
+               if (ret) {
+                       /* PMFW may take longer time to return back the data */
+                       usleep_range(DELAY_MIN_US, 10 * DELAY_MAX_US);
+                       *data = amd_pmc_reg_read(dev, AMD_PMC_REGISTER_ARGUMENT);
+               }
+               break;
+       case AMD_PMC_RESULT_CMD_REJECT_BUSY:
+               dev_err(dev->dev, "SMU not ready. err: 0x%x\n", val);
+               rc = -EBUSY;
+               goto out_unlock;
+       case AMD_PMC_RESULT_CMD_UNKNOWN:
+               dev_err(dev->dev, "SMU cmd unknown. err: 0x%x\n", val);
+               rc = -EINVAL;
+               goto out_unlock;
+       case AMD_PMC_RESULT_CMD_REJECT_PREREQ:
+       case AMD_PMC_RESULT_FAILED:
+       default:
+               dev_err(dev->dev, "SMU cmd failed. err: 0x%x\n", val);
+               rc = -EIO;
+               goto out_unlock;
+       }
+
+out_unlock:
+       mutex_unlock(&dev->lock);
+       amd_pmc_dump_registers(dev);
+       return rc;
+}
+
+static int amd_pmc_get_os_hint(struct amd_pmc_dev *dev)
+{
+       switch (dev->cpu_id) {
+       case AMD_CPU_ID_PCO:
+               return MSG_OS_HINT_PCO;
+       case AMD_CPU_ID_RN:
+       case AMD_CPU_ID_YC:
+               return MSG_OS_HINT_RN;
+       }
+       return -EINVAL;
 }
 
 static int __maybe_unused amd_pmc_suspend(struct device *dev)
 {
        struct amd_pmc_dev *pdev = dev_get_drvdata(dev);
        int rc;
+       u8 msg;
+
+       /* Reset and Start SMU logging - to monitor the s0i3 stats */
+       amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_RESET, 0);
+       amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_START, 0);
 
-       rc = amd_pmc_send_cmd(pdev, 1);
+       msg = amd_pmc_get_os_hint(pdev);
+       rc = amd_pmc_send_cmd(pdev, 1, NULL, msg, 0);
        if (rc)
                dev_err(pdev->dev, "suspend failed\n");
 
-       amd_pmc_dump_registers(pdev);
-       return 0;
+       return rc;
 }
 
 static int __maybe_unused amd_pmc_resume(struct device *dev)
 {
        struct amd_pmc_dev *pdev = dev_get_drvdata(dev);
        int rc;
+       u8 msg;
+
+       /* Let SMU know that we are looking for stats */
+       amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_DUMP_DATA, 0);
 
-       rc = amd_pmc_send_cmd(pdev, 0);
+       msg = amd_pmc_get_os_hint(pdev);
+       rc = amd_pmc_send_cmd(pdev, 0, NULL, msg, 0);
        if (rc)
                dev_err(pdev->dev, "resume failed\n");
 
-       amd_pmc_dump_registers(pdev);
        return 0;
 }
 
@@ -190,6 +379,7 @@ static const struct dev_pm_ops amd_pmc_pm_ops = {
 };
 
 static const struct pci_device_id pmc_pci_ids[] = {
+       { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_YC) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_CZN) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RN) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PCO) },
@@ -201,9 +391,8 @@ static int amd_pmc_probe(struct platform_device *pdev)
 {
        struct amd_pmc_dev *dev = &pmc;
        struct pci_dev *rdev;
-       u32 base_addr_lo;
-       u32 base_addr_hi;
-       u64 base_addr;
+       u32 base_addr_lo, base_addr_hi;
+       u64 base_addr, fch_phys_addr;
        int err;
        u32 val;
 
@@ -248,16 +437,25 @@ static int amd_pmc_probe(struct platform_device *pdev)
        pci_dev_put(rdev);
        base_addr = ((u64)base_addr_hi << 32 | base_addr_lo);
 
-       dev->smu_base = devm_ioremap(dev->dev, base_addr, AMD_PMC_MAPPING_SIZE);
-       if (!dev->smu_base)
-               return -ENOMEM;
-
        dev->regbase = devm_ioremap(dev->dev, base_addr + AMD_PMC_BASE_ADDR_OFFSET,
                                    AMD_PMC_MAPPING_SIZE);
        if (!dev->regbase)
                return -ENOMEM;
 
-       amd_pmc_dump_registers(dev);
+       mutex_init(&dev->lock);
+
+       /* Use FCH registers to get the S0ix stats */
+       base_addr_lo = FCH_BASE_PHY_ADDR_LOW;
+       base_addr_hi = FCH_BASE_PHY_ADDR_HIGH;
+       fch_phys_addr = ((u64)base_addr_hi << 32 | base_addr_lo);
+       dev->fch_virt_addr = devm_ioremap(dev->dev, fch_phys_addr, FCH_SSC_MAPPING_SIZE);
+       if (!dev->fch_virt_addr)
+               return -ENOMEM;
+
+       /* Use SMU to get the s0i3 debug stats */
+       err = amd_pmc_setup_smu_logging(dev);
+       if (err)
+               dev_err(dev->dev, "SMU debugging info not supported on this platform\n");
 
        platform_set_drvdata(pdev, dev);
        amd_pmc_dbgfs_register(dev);
@@ -269,11 +467,14 @@ static int amd_pmc_remove(struct platform_device *pdev)
        struct amd_pmc_dev *dev = platform_get_drvdata(pdev);
 
        amd_pmc_dbgfs_unregister(dev);
+       mutex_destroy(&dev->lock);
        return 0;
 }
 
 static const struct acpi_device_id amd_pmc_acpi_ids[] = {
        {"AMDI0005", 0},
+       {"AMDI0006", 0},
+       {"AMDI0007", 0},
        {"AMD0004", 0},
        { }
 };
index 0cb927f..a81dc4b 100644 (file)
@@ -41,6 +41,10 @@ static int wapf = -1;
 module_param(wapf, uint, 0444);
 MODULE_PARM_DESC(wapf, "WAPF value");
 
+static int tablet_mode_sw = -1;
+module_param(tablet_mode_sw, uint, 0444);
+MODULE_PARM_DESC(tablet_mode_sw, "Tablet mode detect: -1:auto 0:disable 1:kbd-dock 2:lid-flip");
+
 static struct quirk_entry *quirks;
 
 static bool asus_q500a_i8042_filter(unsigned char data, unsigned char str,
@@ -458,6 +462,15 @@ static const struct dmi_system_id asus_quirks[] = {
                },
                .driver_data = &quirk_asus_use_lid_flip_devid,
        },
+       {
+               .callback = dmi_matched,
+               .ident = "ASUS TP200s / E205SA",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "E205SA"),
+               },
+               .driver_data = &quirk_asus_use_lid_flip_devid,
+       },
        {},
 };
 
@@ -477,6 +490,21 @@ static void asus_nb_wmi_quirks(struct asus_wmi_driver *driver)
        else
                wapf = quirks->wapf;
 
+       switch (tablet_mode_sw) {
+       case 0:
+               quirks->use_kbd_dock_devid = false;
+               quirks->use_lid_flip_devid = false;
+               break;
+       case 1:
+               quirks->use_kbd_dock_devid = true;
+               quirks->use_lid_flip_devid = false;
+               break;
+       case 2:
+               quirks->use_kbd_dock_devid = false;
+               quirks->use_lid_flip_devid = true;
+               break;
+       }
+
        if (quirks->i8042_filter) {
                ret = i8042_install_filter(quirks->i8042_filter);
                if (ret) {
diff --git a/drivers/platform/x86/dual_accel_detect.h b/drivers/platform/x86/dual_accel_detect.h
new file mode 100644 (file)
index 0000000..a9eae17
--- /dev/null
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Helper code to detect 360 degree hinges (yoga) style 2-in-1 devices using 2 accelerometers
+ * to allow the OS to determine the angle between the display and the base of the device.
+ *
+ * On Windows these are read by a special HingeAngleService process which calls undocumented
+ * ACPI methods, to let the firmware know if the 2-in-1 is in tablet- or laptop-mode.
+ * The firmware may use this to disable the kbd and touchpad to avoid spurious input in
+ * tablet-mode as well as to report SW_TABLET_MODE info to the OS.
+ *
+ * Since Linux does not call these undocumented methods, the SW_TABLET_MODE info reported
+ * by various drivers/platform/x86 drivers is incorrect. These drivers use the detection
+ * code in this file to disable SW_TABLET_MODE reporting to avoid reporting broken info
+ * (instead userspace can derive the status itself by directly reading the 2 accels).
+ */
+
+#include <linux/acpi.h>
+#include <linux/i2c.h>
+
+static int dual_accel_i2c_resource_count(struct acpi_resource *ares, void *data)
+{
+       struct acpi_resource_i2c_serialbus *sb;
+       int *count = data;
+
+       if (i2c_acpi_get_i2c_resource(ares, &sb))
+               *count = *count + 1;
+
+       return 1;
+}
+
+static int dual_accel_i2c_client_count(struct acpi_device *adev)
+{
+       int ret, count = 0;
+       LIST_HEAD(r);
+
+       ret = acpi_dev_get_resources(adev, &r, dual_accel_i2c_resource_count, &count);
+       if (ret < 0)
+               return ret;
+
+       acpi_dev_free_resource_list(&r);
+       return count;
+}
+
+static bool dual_accel_detect_bosc0200(void)
+{
+       struct acpi_device *adev;
+       int count;
+
+       adev = acpi_dev_get_first_match_dev("BOSC0200", NULL, -1);
+       if (!adev)
+               return false;
+
+       count = dual_accel_i2c_client_count(adev);
+
+       acpi_dev_put(adev);
+
+       return count == 2;
+}
+
+static bool dual_accel_detect(void)
+{
+       /* Systems which use a pair of accels with KIOX010A / KIOX020A ACPI ids */
+       if (acpi_dev_present("KIOX010A", NULL, -1) &&
+           acpi_dev_present("KIOX020A", NULL, -1))
+               return true;
+
+       /* Systems which use a single DUAL250E ACPI device to model 2 accels */
+       if (acpi_dev_present("DUAL250E", NULL, -1))
+               return true;
+
+       /* Systems which use a single BOSC0200 ACPI device to model 2 accels */
+       if (dual_accel_detect_bosc0200())
+               return true;
+
+       return false;
+}
index 5529d7b..7f3a03f 100644 (file)
@@ -140,12 +140,15 @@ static u8 gigabyte_wmi_detect_sensor_usability(struct wmi_device *wdev)
        }}
 
 static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
+       DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M S2H V2"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE"),
+       DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE V2"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 GAMING X V2"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M AORUS PRO-P"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M DS3H"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z390 I AORUS PRO WIFI-CF"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 AORUS ELITE"),
+       DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 GAMING X"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 I AORUS PRO WIFI"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 UD"),
        { }
index 078648a..2e4e97a 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/suspend.h>
+#include "dual_accel_detect.h"
 
 /* When NOT in tablet mode, VGBS returns with the flag 0x40 */
 #define TABLET_MODE_FLAG BIT(6)
@@ -25,6 +26,7 @@ static const struct acpi_device_id intel_hid_ids[] = {
        {"INT33D5", 0},
        {"INTC1051", 0},
        {"INTC1054", 0},
+       {"INTC1070", 0},
        {"", 0},
 };
 MODULE_DEVICE_TABLE(acpi, intel_hid_ids);
@@ -121,6 +123,7 @@ struct intel_hid_priv {
        struct input_dev *array;
        struct input_dev *switches;
        bool wakeup_mode;
+       bool dual_accel;
 };
 
 #define HID_EVENT_FILTER_UUID  "eeec56b3-4442-408f-a792-4edd4d758054"
@@ -450,22 +453,9 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
         * SW_TABLET_MODE report, in these cases we enable support when receiving
         * the first event instead of during driver setup.
         *
-        * Some 360 degree hinges (yoga) style 2-in-1 devices use 2 accelerometers
-        * to allow the OS to determine the angle between the display and the base
-        * of the device. On Windows these are read by a special HingeAngleService
-        * process which calls an ACPI DSM (Device Specific Method) on the
-        * ACPI KIOX010A device node for the sensor in the display, to let the
-        * firmware know if the 2-in-1 is in tablet- or laptop-mode so that it can
-        * disable the kbd and touchpad to avoid spurious input in tablet-mode.
-        *
-        * The linux kxcjk1013 driver calls the DSM for this once at probe time
-        * to ensure that the builtin kbd and touchpad work. On some devices this
-        * causes a "spurious" 0xcd event on the intel-hid ACPI dev. In this case
-        * there is not a functional tablet-mode switch, so we should not register
-        * the tablet-mode switch device.
+        * See dual_accel_detect.h for more info on the dual_accel check.
         */
-       if (!priv->switches && (event == 0xcc || event == 0xcd) &&
-           !acpi_dev_present("KIOX010A", NULL, -1)) {
+       if (!priv->switches && !priv->dual_accel && (event == 0xcc || event == 0xcd)) {
                dev_info(&device->dev, "switch event received, enable switches supports\n");
                err = intel_hid_switches_setup(device);
                if (err)
@@ -606,6 +596,8 @@ static int intel_hid_probe(struct platform_device *device)
                return -ENOMEM;
        dev_set_drvdata(&device->dev, priv);
 
+       priv->dual_accel = dual_accel_detect();
+
        err = intel_hid_input_setup(device);
        if (err) {
                pr_err("Failed to setup Intel HID hotkeys\n");
index 888a764..3091664 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/suspend.h>
+#include "dual_accel_detect.h"
 
 /* Returned when NOT in tablet mode on some HP Stream x360 11 models */
 #define VGBS_TABLET_MODE_FLAG_ALT      0x10
@@ -66,6 +67,7 @@ static const struct key_entry intel_vbtn_switchmap[] = {
 struct intel_vbtn_priv {
        struct input_dev *buttons_dev;
        struct input_dev *switches_dev;
+       bool dual_accel;
        bool has_buttons;
        bool has_switches;
        bool wakeup_mode;
@@ -160,6 +162,10 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
                input_dev = priv->buttons_dev;
        } else if ((ke = sparse_keymap_entry_from_scancode(priv->switches_dev, event))) {
                if (!priv->has_switches) {
+                       /* See dual_accel_detect.h for more info */
+                       if (priv->dual_accel)
+                               return;
+
                        dev_info(&device->dev, "Registering Intel Virtual Switches input-dev after receiving a switch event\n");
                        ret = input_register_device(priv->switches_dev);
                        if (ret)
@@ -248,11 +254,15 @@ static const struct dmi_system_id dmi_switches_allow_list[] = {
        {} /* Array terminator */
 };
 
-static bool intel_vbtn_has_switches(acpi_handle handle)
+static bool intel_vbtn_has_switches(acpi_handle handle, bool dual_accel)
 {
        unsigned long long vgbs;
        acpi_status status;
 
+       /* See dual_accel_detect.h for more info */
+       if (dual_accel)
+               return false;
+
        if (!dmi_check_system(dmi_switches_allow_list))
                return false;
 
@@ -263,13 +273,14 @@ static bool intel_vbtn_has_switches(acpi_handle handle)
 static int intel_vbtn_probe(struct platform_device *device)
 {
        acpi_handle handle = ACPI_HANDLE(&device->dev);
-       bool has_buttons, has_switches;
+       bool dual_accel, has_buttons, has_switches;
        struct intel_vbtn_priv *priv;
        acpi_status status;
        int err;
 
+       dual_accel = dual_accel_detect();
        has_buttons = acpi_has_method(handle, "VBDL");
-       has_switches = intel_vbtn_has_switches(handle);
+       has_switches = intel_vbtn_has_switches(handle, dual_accel);
 
        if (!has_buttons && !has_switches) {
                dev_warn(&device->dev, "failed to read Intel Virtual Button driver\n");
@@ -281,6 +292,7 @@ static int intel_vbtn_probe(struct platform_device *device)
                return -ENOMEM;
        dev_set_drvdata(&device->dev, priv);
 
+       priv->dual_accel = dual_accel;
        priv->has_buttons = has_buttons;
        priv->has_switches = has_switches;
 
index c37349f..d063d91 100644 (file)
@@ -94,6 +94,7 @@ static struct gpiod_lookup_table gpios_led_table = {
                                NULL, 1, GPIO_ACTIVE_LOW),
                GPIO_LOOKUP_IDX(AMD_FCH_GPIO_DRIVER_NAME, APU2_GPIO_LINE_LED3,
                                NULL, 2, GPIO_ACTIVE_LOW),
+               {} /* Terminating entry */
        }
 };
 
@@ -123,6 +124,7 @@ static struct gpiod_lookup_table gpios_key_table = {
        .table = {
                GPIO_LOOKUP_IDX(AMD_FCH_GPIO_DRIVER_NAME, APU2_GPIO_LINE_MODESW,
                                NULL, 0, GPIO_ACTIVE_LOW),
+               {} /* Terminating entry */
        }
 };
 
index 3671b5d..6cfed44 100644 (file)
@@ -571,6 +571,11 @@ static ssize_t current_value_store(struct kobject *kobj,
        else
                ret = tlmi_save_bios_settings("");
 
+       if (!ret && !tlmi_priv.pending_changes) {
+               tlmi_priv.pending_changes = true;
+               /* let userland know it may need to check reboot pending again */
+               kobject_uevent(&tlmi_priv.class_dev->kobj, KOBJ_CHANGE);
+       }
 out:
        kfree(auth_str);
        kfree(set_str);
@@ -647,6 +652,14 @@ static struct kobj_type tlmi_pwd_setting_ktype = {
        .sysfs_ops      = &tlmi_kobj_sysfs_ops,
 };
 
+static ssize_t pending_reboot_show(struct kobject *kobj, struct kobj_attribute *attr,
+                                  char *buf)
+{
+       return sprintf(buf, "%d\n", tlmi_priv.pending_changes);
+}
+
+static struct kobj_attribute pending_reboot = __ATTR_RO(pending_reboot);
+
 /* ---- Initialisation --------------------------------------------------------- */
 static void tlmi_release_attr(void)
 {
@@ -659,6 +672,7 @@ static void tlmi_release_attr(void)
                        kobject_put(&tlmi_priv.setting[i]->kobj);
                }
        }
+       sysfs_remove_file(&tlmi_priv.attribute_kset->kobj, &pending_reboot.attr);
        kset_unregister(tlmi_priv.attribute_kset);
 
        /* Authentication structures */
@@ -709,8 +723,8 @@ static int tlmi_sysfs_init(void)
 
                /* Build attribute */
                tlmi_priv.setting[i]->kobj.kset = tlmi_priv.attribute_kset;
-               ret = kobject_init_and_add(&tlmi_priv.setting[i]->kobj, &tlmi_attr_setting_ktype,
-                               NULL, "%s", tlmi_priv.setting[i]->display_name);
+               ret = kobject_add(&tlmi_priv.setting[i]->kobj, NULL,
+                                 "%s", tlmi_priv.setting[i]->display_name);
                if (ret)
                        goto fail_create_attr;
 
@@ -719,6 +733,10 @@ static int tlmi_sysfs_init(void)
                        goto fail_create_attr;
        }
 
+       ret = sysfs_create_file(&tlmi_priv.attribute_kset->kobj, &pending_reboot.attr);
+       if (ret)
+               goto fail_create_attr;
+
        /* Create authentication entries */
        tlmi_priv.authentication_kset = kset_create_and_add("authentication", NULL,
                                                                &tlmi_priv.class_dev->kobj);
@@ -727,8 +745,7 @@ static int tlmi_sysfs_init(void)
                goto fail_create_attr;
        }
        tlmi_priv.pwd_admin->kobj.kset = tlmi_priv.authentication_kset;
-       ret = kobject_init_and_add(&tlmi_priv.pwd_admin->kobj, &tlmi_pwd_setting_ktype,
-                       NULL, "%s", "Admin");
+       ret = kobject_add(&tlmi_priv.pwd_admin->kobj, NULL, "%s", "Admin");
        if (ret)
                goto fail_create_attr;
 
@@ -737,8 +754,7 @@ static int tlmi_sysfs_init(void)
                goto fail_create_attr;
 
        tlmi_priv.pwd_power->kobj.kset = tlmi_priv.authentication_kset;
-       ret = kobject_init_and_add(&tlmi_priv.pwd_power->kobj, &tlmi_pwd_setting_ktype,
-                       NULL, "%s", "System");
+       ret = kobject_add(&tlmi_priv.pwd_power->kobj, NULL, "%s", "System");
        if (ret)
                goto fail_create_attr;
 
@@ -818,6 +834,7 @@ static int tlmi_analyze(void)
                                pr_info("Error retrieving possible values for %d : %s\n",
                                                i, setting->display_name);
                }
+               kobject_init(&setting->kobj, &tlmi_attr_setting_ktype);
                tlmi_priv.setting[i] = setting;
                tlmi_priv.settings_count++;
                kfree(item);
@@ -844,10 +861,12 @@ static int tlmi_analyze(void)
        if (pwdcfg.password_state & TLMI_PAP_PWD)
                tlmi_priv.pwd_admin->valid = true;
 
+       kobject_init(&tlmi_priv.pwd_admin->kobj, &tlmi_pwd_setting_ktype);
+
        tlmi_priv.pwd_power = kzalloc(sizeof(struct tlmi_pwd_setting), GFP_KERNEL);
        if (!tlmi_priv.pwd_power) {
                ret = -ENOMEM;
-               goto fail_clear_attr;
+               goto fail_free_pwd_admin;
        }
        strscpy(tlmi_priv.pwd_power->kbdlang, "us", TLMI_LANG_MAXLEN);
        tlmi_priv.pwd_power->encoding = TLMI_ENCODING_ASCII;
@@ -859,11 +878,19 @@ static int tlmi_analyze(void)
        if (pwdcfg.password_state & TLMI_POP_PWD)
                tlmi_priv.pwd_power->valid = true;
 
+       kobject_init(&tlmi_priv.pwd_power->kobj, &tlmi_pwd_setting_ktype);
+
        return 0;
 
+fail_free_pwd_admin:
+       kfree(tlmi_priv.pwd_admin);
 fail_clear_attr:
-       for (i = 0; i < TLMI_SETTINGS_COUNT; ++i)
-               kfree(tlmi_priv.setting[i]);
+       for (i = 0; i < TLMI_SETTINGS_COUNT; ++i) {
+               if (tlmi_priv.setting[i]) {
+                       kfree(tlmi_priv.setting[i]->possible_values);
+                       kfree(tlmi_priv.setting[i]);
+               }
+       }
        return ret;
 }
 
index 6fa8da7..eb59884 100644 (file)
@@ -60,6 +60,7 @@ struct think_lmi {
        bool can_get_bios_selections;
        bool can_set_bios_password;
        bool can_get_password_settings;
+       bool pending_changes;
 
        struct tlmi_attr_setting *setting[TLMI_SETTINGS_COUNT];
        struct device *class_dev;
index 603156a..50ff04c 100644 (file)
@@ -73,6 +73,7 @@
 #include <linux/uaccess.h>
 #include <acpi/battery.h>
 #include <acpi/video.h>
+#include "dual_accel_detect.h"
 
 /* ThinkPad CMOS commands */
 #define TP_CMOS_VOLUME_DOWN    0
@@ -3232,7 +3233,7 @@ static int hotkey_init_tablet_mode(void)
                 * the laptop/tent/tablet mode to the EC. The bmc150 iio driver
                 * does not support this, so skip the hotkey on these models.
                 */
-               if (has_tablet_mode && !acpi_dev_present("BOSC0200", "1", -1))
+               if (has_tablet_mode && !dual_accel_detect())
                        tp_features.hotkey_tablet = TP_HOTKEY_TABLET_USES_GMMS;
                type = "GMMS";
        } else if (acpi_evalf(hkey_handle, &res, "MHKG", "qd")) {
index b010e4c..11c60a2 100644 (file)
@@ -78,7 +78,7 @@ static int wl_add(struct acpi_device *device)
 
        err = wireless_input_setup();
        if (err)
-               pr_err("Failed to setup hp wireless hotkeys\n");
+               pr_err("Failed to setup wireless hotkeys\n");
 
        return err;
 }
index 4d11920..4b563db 100644 (file)
@@ -204,6 +204,12 @@ config POWER_RESET_ST
        help
          Reset support for STMicroelectronics boards.
 
+config POWER_RESET_TPS65086
+       bool "TPS65086 restart driver"
+       depends on MFD_TPS65086
+       help
+         This driver adds support for resetting the TPS65086 PMIC on restart.
+
 config POWER_RESET_VERSATILE
        bool "ARM Versatile family reboot driver"
        depends on ARM
index cf3f4d0..f606a2f 100644 (file)
@@ -23,6 +23,7 @@ obj-$(CONFIG_POWER_RESET_QNAP) += qnap-poweroff.o
 obj-$(CONFIG_POWER_RESET_REGULATOR) += regulator-poweroff.o
 obj-$(CONFIG_POWER_RESET_RESTART) += restart-poweroff.o
 obj-$(CONFIG_POWER_RESET_ST) += st-poweroff.o
+obj-$(CONFIG_POWER_RESET_TPS65086) += tps65086-restart.o
 obj-$(CONFIG_POWER_RESET_VERSATILE) += arm-versatile-reboot.o
 obj-$(CONFIG_POWER_RESET_VEXPRESS) += vexpress-poweroff.o
 obj-$(CONFIG_POWER_RESET_XGENE) += xgene-reboot.o
index f1e843d..02f5fdb 100644 (file)
@@ -19,6 +19,7 @@
 #define MII_MARVELL_PHY_PAGE           22
 
 #define MII_PHY_LED_CTRL               16
+#define MII_PHY_LED_POL_CTRL           17
 #define MII_88E1318S_PHY_LED_TCR       18
 #define MII_88E1318S_PHY_WOL_CTRL      16
 #define MII_M1011_IEVENT               19
 #define LED2_FORCE_ON                                  (0x8 << 8)
 #define LEDMASK                                                GENMASK(11,8)
 
+#define MII_88E1318S_PHY_LED_POL_LED2          BIT(4)
+
+struct power_off_cfg {
+       char *mdio_node_name;
+       void (*phy_set_reg)(bool restart);
+};
+
 static struct phy_device *phydev;
+static const struct power_off_cfg *cfg;
 
-static void mvphy_reg_intn(u16 data)
+static void linkstation_mvphy_reg_intn(bool restart)
 {
        int rc = 0, saved_page;
+       u16 data = 0;
+
+       if (restart)
+               data = MII_88E1318S_PHY_LED_TCR_FORCE_INT;
 
        saved_page = phy_select_page(phydev, MII_MARVELL_LED_PAGE);
        if (saved_page < 0)
@@ -66,11 +79,52 @@ err:
                dev_err(&phydev->mdio.dev, "Write register failed, %d\n", rc);
 }
 
+static void readynas_mvphy_set_reg(bool restart)
+{
+       int rc = 0, saved_page;
+       u16 data = 0;
+
+       if (restart)
+               data = MII_88E1318S_PHY_LED_POL_LED2;
+
+       saved_page = phy_select_page(phydev, MII_MARVELL_LED_PAGE);
+       if (saved_page < 0)
+               goto err;
+
+       /* Set the LED[2].0 Polarity bit to the required state */
+       __phy_modify(phydev, MII_PHY_LED_POL_CTRL,
+                    MII_88E1318S_PHY_LED_POL_LED2, data);
+
+       if (!data) {
+               /* If WOL was enabled and a magic packet was received before powering
+                * off, we won't be able to wake up by sending another magic packet.
+                * Clear WOL status.
+                */
+               __phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_MARVELL_WOL_PAGE);
+               __phy_set_bits(phydev, MII_88E1318S_PHY_WOL_CTRL,
+                              MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS);
+       }
+err:
+       rc = phy_restore_page(phydev, saved_page, rc);
+       if (rc < 0)
+               dev_err(&phydev->mdio.dev, "Write register failed, %d\n", rc);
+}
+
+static const struct power_off_cfg linkstation_power_off_cfg = {
+       .mdio_node_name = "mdio",
+       .phy_set_reg = linkstation_mvphy_reg_intn,
+};
+
+static const struct power_off_cfg readynas_power_off_cfg = {
+       .mdio_node_name = "mdio-bus",
+       .phy_set_reg = readynas_mvphy_set_reg,
+};
+
 static int linkstation_reboot_notifier(struct notifier_block *nb,
                                       unsigned long action, void *unused)
 {
        if (action == SYS_RESTART)
-               mvphy_reg_intn(MII_88E1318S_PHY_LED_TCR_FORCE_INT);
+               cfg->phy_set_reg(true);
 
        return NOTIFY_DONE;
 }
@@ -82,14 +136,21 @@ static struct notifier_block linkstation_reboot_nb = {
 static void linkstation_poweroff(void)
 {
        unregister_reboot_notifier(&linkstation_reboot_nb);
-       mvphy_reg_intn(0);
+       cfg->phy_set_reg(false);
 
        kernel_restart("Power off");
 }
 
 static const struct of_device_id ls_poweroff_of_match[] = {
-       { .compatible = "buffalo,ls421d" },
-       { .compatible = "buffalo,ls421de" },
+       { .compatible = "buffalo,ls421d",
+         .data = &linkstation_power_off_cfg,
+       },
+       { .compatible = "buffalo,ls421de",
+         .data = &linkstation_power_off_cfg,
+       },
+       { .compatible = "netgear,readynas-duo-v2",
+         .data = &readynas_power_off_cfg,
+       },
        { },
 };
 
@@ -97,13 +158,17 @@ static int __init linkstation_poweroff_init(void)
 {
        struct mii_bus *bus;
        struct device_node *dn;
+       const struct of_device_id *match;
 
        dn = of_find_matching_node(NULL, ls_poweroff_of_match);
        if (!dn)
                return -ENODEV;
        of_node_put(dn);
 
-       dn = of_find_node_by_name(NULL, "mdio");
+       match = of_match_node(ls_poweroff_of_match, dn);
+       cfg = match->data;
+
+       dn = of_find_node_by_name(NULL, cfg->mdio_node_name);
        if (!dn)
                return -ENODEV;
 
diff --git a/drivers/power/reset/tps65086-restart.c b/drivers/power/reset/tps65086-restart.c
new file mode 100644 (file)
index 0000000..78b89f7
--- /dev/null
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Emil Renner Berthing
+ */
+
+#include <linux/mfd/tps65086.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+
+struct tps65086_restart {
+       struct notifier_block handler;
+       struct device *dev;
+};
+
+static int tps65086_restart_notify(struct notifier_block *this,
+                                  unsigned long mode, void *cmd)
+{
+       struct tps65086_restart *tps65086_restart =
+               container_of(this, struct tps65086_restart, handler);
+       struct tps65086 *tps65086 = dev_get_drvdata(tps65086_restart->dev->parent);
+       int ret;
+
+       ret = regmap_write(tps65086->regmap, TPS65086_FORCESHUTDN, 1);
+       if (ret) {
+               dev_err(tps65086_restart->dev, "%s: error writing to tps65086 pmic: %d\n",
+                       __func__, ret);
+               return NOTIFY_DONE;
+       }
+
+       /* give it a little time */
+       mdelay(200);
+
+       WARN_ON(1);
+
+       return NOTIFY_DONE;
+}
+
+static int tps65086_restart_probe(struct platform_device *pdev)
+{
+       struct tps65086_restart *tps65086_restart;
+       int ret;
+
+       tps65086_restart = devm_kzalloc(&pdev->dev, sizeof(*tps65086_restart), GFP_KERNEL);
+       if (!tps65086_restart)
+               return -ENOMEM;
+
+       platform_set_drvdata(pdev, tps65086_restart);
+
+       tps65086_restart->handler.notifier_call = tps65086_restart_notify;
+       tps65086_restart->handler.priority = 192;
+       tps65086_restart->dev = &pdev->dev;
+
+       ret = register_restart_handler(&tps65086_restart->handler);
+       if (ret) {
+               dev_err(&pdev->dev, "%s: cannot register restart handler: %d\n",
+                       __func__, ret);
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
+static int tps65086_restart_remove(struct platform_device *pdev)
+{
+       struct tps65086_restart *tps65086_restart = platform_get_drvdata(pdev);
+       int ret;
+
+       ret = unregister_restart_handler(&tps65086_restart->handler);
+       if (ret) {
+               dev_err(&pdev->dev, "%s: cannot unregister restart handler: %d\n",
+                       __func__, ret);
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
+static const struct platform_device_id tps65086_restart_id_table[] = {
+       { "tps65086-reset", },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, tps65086_restart_id_table);
+
+static struct platform_driver tps65086_restart_driver = {
+       .driver = {
+               .name = "tps65086-restart",
+       },
+       .probe = tps65086_restart_probe,
+       .remove = tps65086_restart_remove,
+       .id_table = tps65086_restart_id_table,
+};
+module_platform_driver(tps65086_restart_driver);
+
+MODULE_AUTHOR("Emil Renner Berthing <kernel@esmil.dk>");
+MODULE_DESCRIPTION("TPS65086 restart driver");
+MODULE_LICENSE("GPL v2");
index 11f5368..fcc7534 100644 (file)
@@ -358,7 +358,7 @@ config AXP288_CHARGER
 
 config AXP288_FUEL_GAUGE
        tristate "X-Powers AXP288 Fuel Gauge"
-       depends on MFD_AXP20X && IIO
+       depends on MFD_AXP20X && IIO && IOSF_MBI
        help
          Say yes here to have support for X-Power power management IC (PMIC)
          Fuel Gauge. The device provides battery statistics and status
@@ -577,6 +577,17 @@ config CHARGER_MP2629
          Battery charger. This driver provides Battery charger power management
          functions on the systems.
 
+config CHARGER_MT6360
+       tristate "Mediatek MT6360 Charger Driver"
+       depends on MFD_MT6360
+       depends on REGULATOR
+       select LINEAR_RANGES
+       help
+         Say Y here to enable MT6360 Charger Part.
+         The device supports High-Accuracy Voltage/Current Regulation,
+         Average Input Current Regulation, Battery Temperature Sensing,
+         Over-Temperature Protection, DPDM Detection for BC1.2.
+
 config CHARGER_QCOM_SMBB
        tristate "Qualcomm Switch-Mode Battery Charger and Boost"
        depends on MFD_SPMI_PMIC || COMPILE_TEST
@@ -669,6 +680,7 @@ config CHARGER_BQ256XX
 config CHARGER_SMB347
        tristate "Summit Microelectronics SMB3XX Battery Charger"
        depends on I2C
+       depends on REGULATOR
        select REGMAP_I2C
        help
          Say Y to include support for Summit Microelectronics SMB345,
@@ -736,6 +748,16 @@ config CHARGER_CROS_USBPD
          what is connected to USB PD ports from the EC and converts
          that into power_supply properties.
 
+config CHARGER_CROS_PCHG
+       tristate "ChromeOS EC based peripheral charger"
+       depends on MFD_CROS_EC_DEV
+       default MFD_CROS_EC_DEV
+       help
+         Say Y here to enable ChromeOS EC based peripheral charge driver.
+         This driver gets various information about the devices connected to
+         the peripheral charge ports from the EC and converts that into
+         power_supply properties.
+
 config CHARGER_SC2731
        tristate "Spreadtrum SC2731 charger driver"
        depends on MFD_SC27XX_PMIC || COMPILE_TEST
@@ -782,6 +804,8 @@ config CHARGER_WILCO
 config RN5T618_POWER
        tristate "RN5T618 charger/fuel gauge support"
        depends on MFD_RN5T618
+       depends on RN5T618_ADC
+       depends on IIO
        help
          Say Y here to have support for RN5T618 PMIC family fuel gauge and charger.
          This driver can also be built as a module. If so, the module will be
index 33059a9..4e55a11 100644 (file)
@@ -60,7 +60,7 @@ obj-$(CONFIG_BATTERY_TWL4030_MADC)    += twl4030_madc_battery.o
 obj-$(CONFIG_CHARGER_88PM860X) += 88pm860x_charger.o
 obj-$(CONFIG_CHARGER_PCF50633) += pcf50633-charger.o
 obj-$(CONFIG_BATTERY_RX51)     += rx51_battery.o
-obj-$(CONFIG_AB8500_BM)                += ab8500_bmdata.o ab8500_charger.o ab8500_fg.o ab8500_btemp.o abx500_chargalg.o
+obj-$(CONFIG_AB8500_BM)                += ab8500_bmdata.o ab8500_charger.o ab8500_fg.o ab8500_btemp.o ab8500_chargalg.o
 obj-$(CONFIG_CHARGER_CPCAP)    += cpcap-charger.o
 obj-$(CONFIG_CHARGER_ISP1704)  += isp1704_charger.o
 obj-$(CONFIG_CHARGER_MAX8903)  += max8903_charger.o
@@ -78,6 +78,7 @@ obj-$(CONFIG_CHARGER_MAX77693)        += max77693_charger.o
 obj-$(CONFIG_CHARGER_MAX8997)  += max8997_charger.o
 obj-$(CONFIG_CHARGER_MAX8998)  += max8998_charger.o
 obj-$(CONFIG_CHARGER_MP2629)   += mp2629_charger.o
+obj-$(CONFIG_CHARGER_MT6360)   += mt6360_charger.o
 obj-$(CONFIG_CHARGER_QCOM_SMBB)        += qcom_smbb.o
 obj-$(CONFIG_CHARGER_BQ2415X)  += bq2415x_charger.o
 obj-$(CONFIG_CHARGER_BQ24190)  += bq24190_charger.o
@@ -93,6 +94,7 @@ obj-$(CONFIG_CHARGER_TPS65217)        += tps65217_charger.o
 obj-$(CONFIG_AXP288_FUEL_GAUGE) += axp288_fuel_gauge.o
 obj-$(CONFIG_AXP288_CHARGER)   += axp288_charger.o
 obj-$(CONFIG_CHARGER_CROS_USBPD)       += cros_usbpd-charger.o
+obj-$(CONFIG_CHARGER_CROS_PCHG)        += cros_peripheral_charger.o
 obj-$(CONFIG_CHARGER_SC2731)   += sc2731_charger.o
 obj-$(CONFIG_FUEL_GAUGE_SC27XX)        += sc27xx_fuel_gauge.o
 obj-$(CONFIG_CHARGER_UCS1002)  += ucs1002_power.o
index 0c94057..d11405b 100644 (file)
@@ -269,43 +269,43 @@ enum bup_vch_sel {
 
 /*
  * ADC for the battery thermistor.
- * When using the ABx500_ADC_THERM_BATCTRL the battery ID resistor is combined
+ * When using the AB8500_ADC_THERM_BATCTRL the battery ID resistor is combined
  * with a NTC resistor to both identify the battery and to measure its
  * temperature. Different phone manufactures uses different techniques to both
  * identify the battery and to read its temperature.
  */
-enum abx500_adc_therm {
-       ABx500_ADC_THERM_BATCTRL,
-       ABx500_ADC_THERM_BATTEMP,
+enum ab8500_adc_therm {
+       AB8500_ADC_THERM_BATCTRL,
+       AB8500_ADC_THERM_BATTEMP,
 };
 
 /**
- * struct abx500_res_to_temp - defines one point in a temp to res curve. To
+ * struct ab8500_res_to_temp - defines one point in a temp to res curve. To
  * be used in battery packs that combines the identification resistor with a
  * NTC resistor.
  * @temp:                      battery pack temperature in Celsius
  * @resist:                    NTC resistor net total resistance
  */
-struct abx500_res_to_temp {
+struct ab8500_res_to_temp {
        int temp;
        int resist;
 };
 
 /**
- * struct abx500_v_to_cap - Table for translating voltage to capacity
+ * struct ab8500_v_to_cap - Table for translating voltage to capacity
  * @voltage:           Voltage in mV
  * @capacity:          Capacity in percent
  */
-struct abx500_v_to_cap {
+struct ab8500_v_to_cap {
        int voltage;
        int capacity;
 };
 
 /* Forward declaration */
-struct abx500_fg;
+struct ab8500_fg;
 
 /**
- * struct abx500_fg_parameters - Fuel gauge algorithm parameters, in seconds
+ * struct ab8500_fg_parameters - Fuel gauge algorithm parameters, in seconds
  * if not specified
  * @recovery_sleep_timer:      Time between measurements while recovering
  * @recovery_total_time:       Total recovery time
@@ -333,7 +333,7 @@ struct abx500_fg;
  * @pcut_max_restart:          Max number of restarts
  * @pcut_debounce_time:                Sets battery debounce time
  */
-struct abx500_fg_parameters {
+struct ab8500_fg_parameters {
        int recovery_sleep_timer;
        int recovery_total_time;
        int init_timer;
@@ -357,13 +357,13 @@ struct abx500_fg_parameters {
 };
 
 /**
- * struct abx500_charger_maximization - struct used by the board config.
+ * struct ab8500_charger_maximization - struct used by the board config.
  * @use_maxi:          Enable maximization for this battery type
  * @maxi_chg_curr:     Maximum charger current allowed
  * @maxi_wait_cycles:  cycles to wait before setting charger current
  * @charger_curr_step  delta between two charger current settings (mA)
  */
-struct abx500_maxim_parameters {
+struct ab8500_maxim_parameters {
        bool ena_maxi;
        int chg_curr;
        int wait_cycles;
@@ -371,7 +371,7 @@ struct abx500_maxim_parameters {
 };
 
 /**
- * struct abx500_battery_type - different batteries supported
+ * struct ab8500_battery_type - different batteries supported
  * @name:                      battery technology
  * @resis_high:                        battery upper resistance limit
  * @resis_low:                 battery lower resistance limit
@@ -400,7 +400,7 @@ struct abx500_maxim_parameters {
  * @n_batres_tbl_elements      number of elements in the batres_tbl
  * @batres_tbl                 battery internal resistance vs temperature table
  */
-struct abx500_battery_type {
+struct ab8500_battery_type {
        int name;
        int resis_high;
        int resis_low;
@@ -421,22 +421,22 @@ struct abx500_battery_type {
        int low_high_vol_lvl;
        int battery_resistance;
        int n_temp_tbl_elements;
-       const struct abx500_res_to_temp *r_to_t_tbl;
+       const struct ab8500_res_to_temp *r_to_t_tbl;
        int n_v_cap_tbl_elements;
-       const struct abx500_v_to_cap *v_to_cap_tbl;
+       const struct ab8500_v_to_cap *v_to_cap_tbl;
        int n_batres_tbl_elements;
        const struct batres_vs_temp *batres_tbl;
 };
 
 /**
- * struct abx500_bm_capacity_levels - abx500 capacity level data
+ * struct ab8500_bm_capacity_levels - ab8500 capacity level data
  * @critical:          critical capacity level in percent
  * @low:               low capacity level in percent
  * @normal:            normal capacity level in percent
  * @high:              high capacity level in percent
  * @full:              full capacity level in percent
  */
-struct abx500_bm_capacity_levels {
+struct ab8500_bm_capacity_levels {
        int critical;
        int low;
        int normal;
@@ -445,13 +445,13 @@ struct abx500_bm_capacity_levels {
 };
 
 /**
- * struct abx500_bm_charger_parameters - Charger specific parameters
+ * struct ab8500_bm_charger_parameters - Charger specific parameters
  * @usb_volt_max:      maximum allowed USB charger voltage in mV
  * @usb_curr_max:      maximum allowed USB charger current in mA
  * @ac_volt_max:       maximum allowed AC charger voltage in mV
  * @ac_curr_max:       maximum allowed AC charger current in mA
  */
-struct abx500_bm_charger_parameters {
+struct ab8500_bm_charger_parameters {
        int usb_volt_max;
        int usb_curr_max;
        int ac_volt_max;
@@ -459,7 +459,7 @@ struct abx500_bm_charger_parameters {
 };
 
 /**
- * struct abx500_bm_data - abx500 battery management data
+ * struct ab8500_bm_data - ab8500 battery management data
  * @temp_under         under this temp, charging is stopped
  * @temp_low           between this temp and temp_under charging is reduced
  * @temp_high          between this temp and temp_over charging is reduced
@@ -473,7 +473,7 @@ struct abx500_bm_charger_parameters {
  * @bkup_bat_i         current which we charge the backup battery with
  * @no_maintenance     indicates that maintenance charging is disabled
  * @capacity_scaling    indicates whether capacity scaling is to be used
- * @abx500_adc_therm   placement of thermistor, batctrl or battemp adc
+ * @ab8500_adc_therm   placement of thermistor, batctrl or battemp adc
  * @chg_unknown_bat    flag to enable charging of unknown batteries
  * @enable_overshoot   flag to enable VBAT overshoot control
  * @auto_trig          flag to enable auto adc trigger
@@ -494,7 +494,7 @@ struct abx500_bm_charger_parameters {
  * @chg_params         charger parameters
  * @fg_params          fuel gauge parameters
  */
-struct abx500_bm_data {
+struct ab8500_bm_data {
        int temp_under;
        int temp_low;
        int temp_high;
@@ -511,7 +511,7 @@ struct abx500_bm_data {
        bool chg_unknown_bat;
        bool enable_overshoot;
        bool auto_trig;
-       enum abx500_adc_therm adc_therm;
+       enum ab8500_adc_therm adc_therm;
        int fg_res;
        int n_btypes;
        int batt_id;
@@ -523,11 +523,11 @@ struct abx500_bm_data {
        int n_chg_in_curr;
        int *chg_output_curr;
        int *chg_input_curr;
-       const struct abx500_maxim_parameters *maxi;
-       const struct abx500_bm_capacity_levels *cap_levels;
-       struct abx500_battery_type *bat_type;
-       const struct abx500_bm_charger_parameters *chg_params;
-       const struct abx500_fg_parameters *fg_params;
+       const struct ab8500_maxim_parameters *maxi;
+       const struct ab8500_bm_capacity_levels *cap_levels;
+       struct ab8500_battery_type *bat_type;
+       const struct ab8500_bm_charger_parameters *chg_params;
+       const struct ab8500_fg_parameters *fg_params;
 };
 
 enum {
@@ -561,160 +561,7 @@ struct batres_vs_temp {
 /* Forward declaration */
 struct ab8500_fg;
 
-/**
- * struct ab8500_fg_parameters - Fuel gauge algorithm parameters, in seconds
- * if not specified
- * @recovery_sleep_timer:      Time between measurements while recovering
- * @recovery_total_time:       Total recovery time
- * @init_timer:                        Measurement interval during startup
- * @init_discard_time:         Time we discard voltage measurement at startup
- * @init_total_time:           Total init time during startup
- * @high_curr_time:            Time current has to be high to go to recovery
- * @accu_charging:             FG accumulation time while charging
- * @accu_high_curr:            FG accumulation time in high current mode
- * @high_curr_threshold:       High current threshold, in mA
- * @lowbat_threshold:          Low battery threshold, in mV
- * @battok_falling_th_sel0     Threshold in mV for battOk signal sel0
- *                             Resolution in 50 mV step.
- * @battok_raising_th_sel1     Threshold in mV for battOk signal sel1
- *                             Resolution in 50 mV step.
- * @user_cap_limit             Capacity reported from user must be within this
- *                             limit to be considered as sane, in percentage
- *                             points.
- * @maint_thres                        This is the threshold where we stop reporting
- *                             battery full while in maintenance, in per cent
- * @pcut_enable:                       Enable power cut feature in ab8505
- * @pcut_max_time:             Max time threshold
- * @pcut_flag_time:            Flagtime threshold
- * @pcut_max_restart:          Max number of restarts
- * @pcut_debunce_time: Sets battery debounce time
- */
-struct ab8500_fg_parameters {
-       int recovery_sleep_timer;
-       int recovery_total_time;
-       int init_timer;
-       int init_discard_time;
-       int init_total_time;
-       int high_curr_time;
-       int accu_charging;
-       int accu_high_curr;
-       int high_curr_threshold;
-       int lowbat_threshold;
-       int battok_falling_th_sel0;
-       int battok_raising_th_sel1;
-       int user_cap_limit;
-       int maint_thres;
-       bool pcut_enable;
-       u8 pcut_max_time;
-       u8 pcut_flag_time;
-       u8 pcut_max_restart;
-       u8 pcut_debunce_time;
-};
-
-/**
- * struct ab8500_charger_maximization - struct used by the board config.
- * @use_maxi:          Enable maximization for this battery type
- * @maxi_chg_curr:     Maximum charger current allowed
- * @maxi_wait_cycles:  cycles to wait before setting charger current
- * @charger_curr_step  delta between two charger current settings (mA)
- */
-struct ab8500_maxim_parameters {
-       bool ena_maxi;
-       int chg_curr;
-       int wait_cycles;
-       int charger_curr_step;
-};
-
-/**
- * struct ab8500_bm_capacity_levels - ab8500 capacity level data
- * @critical:          critical capacity level in percent
- * @low:               low capacity level in percent
- * @normal:            normal capacity level in percent
- * @high:              high capacity level in percent
- * @full:              full capacity level in percent
- */
-struct ab8500_bm_capacity_levels {
-       int critical;
-       int low;
-       int normal;
-       int high;
-       int full;
-};
-
-/**
- * struct ab8500_bm_charger_parameters - Charger specific parameters
- * @usb_volt_max:      maximum allowed USB charger voltage in mV
- * @usb_curr_max:      maximum allowed USB charger current in mA
- * @ac_volt_max:       maximum allowed AC charger voltage in mV
- * @ac_curr_max:       maximum allowed AC charger current in mA
- */
-struct ab8500_bm_charger_parameters {
-       int usb_volt_max;
-       int usb_curr_max;
-       int ac_volt_max;
-       int ac_curr_max;
-};
-
-/**
- * struct ab8500_bm_data - ab8500 battery management data
- * @temp_under         under this temp, charging is stopped
- * @temp_low           between this temp and temp_under charging is reduced
- * @temp_high          between this temp and temp_over charging is reduced
- * @temp_over          over this temp, charging is stopped
- * @temp_interval_chg  temperature measurement interval in s when charging
- * @temp_interval_nochg        temperature measurement interval in s when not charging
- * @main_safety_tmr_h  safety timer for main charger
- * @usb_safety_tmr_h   safety timer for usb charger
- * @bkup_bat_v         voltage which we charge the backup battery with
- * @bkup_bat_i         current which we charge the backup battery with
- * @no_maintenance     indicates that maintenance charging is disabled
- * @capacity_scaling    indicates whether capacity scaling is to be used
- * @adc_therm          placement of thermistor, batctrl or battemp adc
- * @chg_unknown_bat    flag to enable charging of unknown batteries
- * @enable_overshoot   flag to enable VBAT overshoot control
- * @fg_res             resistance of FG resistor in 0.1mOhm
- * @n_btypes           number of elements in array bat_type
- * @batt_id            index of the identified battery in array bat_type
- * @interval_charging  charge alg cycle period time when charging (sec)
- * @interval_not_charging charge alg cycle period time when not charging (sec)
- * @temp_hysteresis    temperature hysteresis
- * @gnd_lift_resistance        Battery ground to phone ground resistance (mOhm)
- * @maxi:              maximization parameters
- * @cap_levels         capacity in percent for the different capacity levels
- * @bat_type           table of supported battery types
- * @chg_params         charger parameters
- * @fg_params          fuel gauge parameters
- */
-struct ab8500_bm_data {
-       int temp_under;
-       int temp_low;
-       int temp_high;
-       int temp_over;
-       int temp_interval_chg;
-       int temp_interval_nochg;
-       int main_safety_tmr_h;
-       int usb_safety_tmr_h;
-       int bkup_bat_v;
-       int bkup_bat_i;
-       bool no_maintenance;
-       bool capacity_scaling;
-       bool chg_unknown_bat;
-       bool enable_overshoot;
-       enum abx500_adc_therm adc_therm;
-       int fg_res;
-       int n_btypes;
-       int batt_id;
-       int interval_charging;
-       int interval_not_charging;
-       int temp_hysteresis;
-       int gnd_lift_resistance;
-       const struct ab8500_maxim_parameters *maxi;
-       const struct ab8500_bm_capacity_levels *cap_levels;
-       const struct ab8500_bm_charger_parameters *chg_params;
-       const struct ab8500_fg_parameters *fg_params;
-};
-
-extern struct abx500_bm_data ab8500_bm_data;
+extern struct ab8500_bm_data ab8500_bm_data;
 
 void ab8500_charger_usb_state_changed(u8 bm_usb_state, u16 mA);
 struct ab8500_fg *ab8500_fg_get(void);
@@ -725,10 +572,10 @@ int ab8500_fg_inst_curr_started(struct ab8500_fg *di);
 int ab8500_fg_inst_curr_done(struct ab8500_fg *di);
 int ab8500_bm_of_probe(struct device *dev,
                       struct device_node *np,
-                      struct abx500_bm_data *bm);
+                      struct ab8500_bm_data *bm);
 
 extern struct platform_driver ab8500_fg_driver;
 extern struct platform_driver ab8500_btemp_driver;
-extern struct platform_driver abx500_chargalg_driver;
+extern struct platform_driver ab8500_chargalg_driver;
 
 #endif /* _AB8500_CHARGER_H_ */
index c2b8c0b..6f5fb79 100644 (file)
@@ -2,8 +2,6 @@
 #include <linux/export.h>
 #include <linux/power_supply.h>
 #include <linux/of.h>
-#include <linux/mfd/abx500.h>
-#include <linux/mfd/abx500/ab8500.h>
 
 #include "ab8500-bm.h"
 
@@ -13,7 +11,7 @@
  * Note that the res_to_temp table must be strictly sorted by falling resistance
  * values to work.
  */
-const struct abx500_res_to_temp ab8500_temp_tbl_a_thermistor[] = {
+const struct ab8500_res_to_temp ab8500_temp_tbl_a_thermistor[] = {
        {-5, 53407},
        { 0, 48594},
        { 5, 43804},
@@ -35,7 +33,7 @@ EXPORT_SYMBOL(ab8500_temp_tbl_a_thermistor);
 const int ab8500_temp_tbl_a_size = ARRAY_SIZE(ab8500_temp_tbl_a_thermistor);
 EXPORT_SYMBOL(ab8500_temp_tbl_a_size);
 
-const struct abx500_res_to_temp ab8500_temp_tbl_b_thermistor[] = {
+const struct ab8500_res_to_temp ab8500_temp_tbl_b_thermistor[] = {
        {-5, 200000},
        { 0, 159024},
        { 5, 151921},
@@ -57,7 +55,7 @@ EXPORT_SYMBOL(ab8500_temp_tbl_b_thermistor);
 const int ab8500_temp_tbl_b_size = ARRAY_SIZE(ab8500_temp_tbl_b_thermistor);
 EXPORT_SYMBOL(ab8500_temp_tbl_b_size);
 
-static const struct abx500_v_to_cap cap_tbl_a_thermistor[] = {
+static const struct ab8500_v_to_cap cap_tbl_a_thermistor[] = {
        {4171,  100},
        {4114,   95},
        {4009,   83},
@@ -80,7 +78,7 @@ static const struct abx500_v_to_cap cap_tbl_a_thermistor[] = {
        {3247,    0},
 };
 
-static const struct abx500_v_to_cap cap_tbl_b_thermistor[] = {
+static const struct ab8500_v_to_cap cap_tbl_b_thermistor[] = {
        {4161,  100},
        {4124,   98},
        {4044,   90},
@@ -103,7 +101,7 @@ static const struct abx500_v_to_cap cap_tbl_b_thermistor[] = {
        {3250,    0},
 };
 
-static const struct abx500_v_to_cap cap_tbl[] = {
+static const struct ab8500_v_to_cap cap_tbl[] = {
        {4186,  100},
        {4163,   99},
        {4114,   95},
@@ -134,7 +132,7 @@ static const struct abx500_v_to_cap cap_tbl[] = {
  * Note that the res_to_temp table must be strictly sorted by falling
  * resistance values to work.
  */
-static const struct abx500_res_to_temp temp_tbl[] = {
+static const struct ab8500_res_to_temp temp_tbl[] = {
        {-5, 214834},
        { 0, 162943},
        { 5, 124820},
@@ -191,7 +189,7 @@ static const struct batres_vs_temp temp_to_batres_tbl_9100[] = {
        {-20, 180},
 };
 
-static struct abx500_battery_type bat_type_thermistor[] = {
+static struct ab8500_battery_type bat_type_thermistor[] = {
        [BATTERY_UNKNOWN] = {
                /* First element always represent the UNKNOWN battery */
                .name = POWER_SUPPLY_TECHNOLOGY_UNKNOWN,
@@ -277,7 +275,7 @@ static struct abx500_battery_type bat_type_thermistor[] = {
        },
 };
 
-static struct abx500_battery_type bat_type_ext_thermistor[] = {
+static struct ab8500_battery_type bat_type_ext_thermistor[] = {
        [BATTERY_UNKNOWN] = {
                /* First element always represent the UNKNOWN battery */
                .name = POWER_SUPPLY_TECHNOLOGY_UNKNOWN,
@@ -394,7 +392,7 @@ static struct abx500_battery_type bat_type_ext_thermistor[] = {
        },
 };
 
-static const struct abx500_bm_capacity_levels cap_levels = {
+static const struct ab8500_bm_capacity_levels cap_levels = {
        .critical       = 2,
        .low            = 10,
        .normal         = 70,
@@ -402,7 +400,7 @@ static const struct abx500_bm_capacity_levels cap_levels = {
        .full           = 100,
 };
 
-static const struct abx500_fg_parameters fg = {
+static const struct ab8500_fg_parameters fg = {
        .recovery_sleep_timer = 10,
        .recovery_total_time = 100,
        .init_timer = 1,
@@ -424,14 +422,14 @@ static const struct abx500_fg_parameters fg = {
        .pcut_debounce_time = 2,
 };
 
-static const struct abx500_maxim_parameters ab8500_maxi_params = {
+static const struct ab8500_maxim_parameters ab8500_maxi_params = {
        .ena_maxi = true,
        .chg_curr = 910,
        .wait_cycles = 10,
        .charger_curr_step = 100,
 };
 
-static const struct abx500_bm_charger_parameters chg = {
+static const struct ab8500_bm_charger_parameters chg = {
        .usb_volt_max           = 5500,
        .usb_curr_max           = 1500,
        .ac_volt_max            = 7500,
@@ -456,7 +454,7 @@ static int ab8500_charge_input_curr_map[] = {
         700,    800,    900,    1000,   1100,   1300,   1400,   1500,
 };
 
-struct abx500_bm_data ab8500_bm_data = {
+struct ab8500_bm_data ab8500_bm_data = {
        .temp_under             = 3,
        .temp_low               = 8,
        .temp_high              = 43,
@@ -469,7 +467,7 @@ struct abx500_bm_data ab8500_bm_data = {
        .bkup_bat_i             = BUP_ICH_SEL_150UA,
        .no_maintenance         = false,
        .capacity_scaling       = false,
-       .adc_therm              = ABx500_ADC_THERM_BATCTRL,
+       .adc_therm              = AB8500_ADC_THERM_BATCTRL,
        .chg_unknown_bat        = false,
        .enable_overshoot       = false,
        .fg_res                 = 100,
@@ -492,7 +490,7 @@ struct abx500_bm_data ab8500_bm_data = {
 
 int ab8500_bm_of_probe(struct device *dev,
                       struct device_node *np,
-                      struct abx500_bm_data *bm)
+                      struct ab8500_bm_data *bm)
 {
        const struct batres_vs_temp *tmp_batres_tbl;
        struct device_node *battery_node;
@@ -531,7 +529,7 @@ int ab8500_bm_of_probe(struct device *dev,
        } else {
                bm->n_btypes   = 4;
                bm->bat_type   = bat_type_ext_thermistor;
-               bm->adc_therm  = ABx500_ADC_THERM_BATTEMP;
+               bm->adc_therm  = AB8500_ADC_THERM_BATTEMP;
                tmp_batres_tbl = temp_to_batres_tbl_ext_thermistor;
        }
 
index dbdcff3..b6c9111 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/mfd/abx500.h>
 #include <linux/mfd/abx500/ab8500.h>
 #include <linux/iio/consumer.h>
+#include <linux/fixp-arith.h>
 
 #include "ab8500-bm.h"
 
@@ -102,7 +103,7 @@ struct ab8500_btemp {
        struct iio_channel *btemp_ball;
        struct iio_channel *bat_ctrl;
        struct ab8500_fg *fg;
-       struct abx500_bm_data *bm;
+       struct ab8500_bm_data *bm;
        struct power_supply *btemp_psy;
        struct ab8500_btemp_events events;
        struct ab8500_btemp_ranges btemp_ranges;
@@ -144,7 +145,7 @@ static int ab8500_btemp_batctrl_volt_to_res(struct ab8500_btemp *di,
                return (450000 * (v_batctrl)) / (1800 - v_batctrl);
        }
 
-       if (di->bm->adc_therm == ABx500_ADC_THERM_BATCTRL) {
+       if (di->bm->adc_therm == AB8500_ADC_THERM_BATCTRL) {
                /*
                 * If the battery has internal NTC, we use the current
                 * source to calculate the resistance.
@@ -206,7 +207,7 @@ static int ab8500_btemp_curr_source_enable(struct ab8500_btemp *di,
                return 0;
 
        /* Only do this for batteries with internal NTC */
-       if (di->bm->adc_therm == ABx500_ADC_THERM_BATCTRL && enable) {
+       if (di->bm->adc_therm == AB8500_ADC_THERM_BATCTRL && enable) {
 
                if (di->curr_source == BTEMP_BATCTRL_CURR_SRC_7UA)
                        curr = BAT_CTRL_7U_ENA;
@@ -239,7 +240,7 @@ static int ab8500_btemp_curr_source_enable(struct ab8500_btemp *di,
                                __func__);
                        goto disable_curr_source;
                }
-       } else if (di->bm->adc_therm == ABx500_ADC_THERM_BATCTRL && !enable) {
+       } else if (di->bm->adc_therm == AB8500_ADC_THERM_BATCTRL && !enable) {
                dev_dbg(di->dev, "Disable BATCTRL curr source\n");
 
                /* Write 0 to the curr bits */
@@ -417,7 +418,7 @@ static int ab8500_btemp_get_batctrl_res(struct ab8500_btemp *di)
  * based on the NTC resistance.
  */
 static int ab8500_btemp_res_to_temp(struct ab8500_btemp *di,
-       const struct abx500_res_to_temp *tbl, int tbl_size, int res)
+       const struct ab8500_res_to_temp *tbl, int tbl_size, int res)
 {
        int i;
        /*
@@ -437,8 +438,9 @@ static int ab8500_btemp_res_to_temp(struct ab8500_btemp *di,
                        i++;
        }
 
-       return tbl[i].temp + ((tbl[i + 1].temp - tbl[i].temp) *
-               (res - tbl[i].resist)) / (tbl[i + 1].resist - tbl[i].resist);
+       return fixp_linear_interpolate(tbl[i].resist, tbl[i].temp,
+                                      tbl[i + 1].resist, tbl[i + 1].temp,
+                                      res);
 }
 
 /**
@@ -456,7 +458,7 @@ static int ab8500_btemp_measure_temp(struct ab8500_btemp *di)
 
        id = di->bm->batt_id;
 
-       if (di->bm->adc_therm == ABx500_ADC_THERM_BATCTRL &&
+       if (di->bm->adc_therm == AB8500_ADC_THERM_BATCTRL &&
                        id != BATTERY_UNKNOWN) {
 
                rbat = ab8500_btemp_get_batctrl_res(di);
@@ -525,7 +527,7 @@ static int ab8500_btemp_id(struct ab8500_btemp *di)
                        dev_dbg(di->dev, "Battery detected on %s"
                                " low %d < res %d < high: %d"
                                " index: %d\n",
-                               di->bm->adc_therm == ABx500_ADC_THERM_BATCTRL ?
+                               di->bm->adc_therm == AB8500_ADC_THERM_BATCTRL ?
                                "BATCTRL" : "BATTEMP",
                                di->bm->bat_type[i].resis_low, res,
                                di->bm->bat_type[i].resis_high, i);
@@ -545,7 +547,7 @@ static int ab8500_btemp_id(struct ab8500_btemp *di)
         * We only have to change current source if the
         * detected type is Type 1.
         */
-       if (di->bm->adc_therm == ABx500_ADC_THERM_BATCTRL &&
+       if (di->bm->adc_therm == AB8500_ADC_THERM_BATCTRL &&
            di->bm->batt_id == 1) {
                dev_dbg(di->dev, "Set BATCTRL current source to 20uA\n");
                di->curr_source = BTEMP_BATCTRL_CURR_SRC_20UA;
diff --git a/drivers/power/supply/ab8500_chargalg.c b/drivers/power/supply/ab8500_chargalg.c
new file mode 100644 (file)
index 0000000..ff4b26b
--- /dev/null
@@ -0,0 +1,2096 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) ST-Ericsson SA 2012
+ * Copyright (c) 2012 Sony Mobile Communications AB
+ *
+ * Charging algorithm driver for AB8500
+ *
+ * Authors:
+ *     Johan Palsson <johan.palsson@stericsson.com>
+ *     Karl Komierowski <karl.komierowski@stericsson.com>
+ *     Arun R Murthy <arun.murthy@stericsson.com>
+ *     Author: Imre Sunyi <imre.sunyi@sonymobile.com>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/component.h>
+#include <linux/hrtimer.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/kobject.h>
+#include <linux/of.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab8500.h>
+#include <linux/notifier.h>
+
+#include "ab8500-bm.h"
+#include "ab8500-chargalg.h"
+
+/* Watchdog kick interval */
+#define CHG_WD_INTERVAL                        (6 * HZ)
+
+/* End-of-charge criteria counter */
+#define EOC_COND_CNT                   10
+
+/* One hour expressed in seconds */
+#define ONE_HOUR_IN_SECONDS            3600
+
+/* Five minutes expressed in seconds */
+#define FIVE_MINUTES_IN_SECONDS        300
+
+#define CHARGALG_CURR_STEP_LOW         0
+#define CHARGALG_CURR_STEP_HIGH        100
+
+enum ab8500_chargers {
+       NO_CHG,
+       AC_CHG,
+       USB_CHG,
+};
+
+struct ab8500_chargalg_charger_info {
+       enum ab8500_chargers conn_chg;
+       enum ab8500_chargers prev_conn_chg;
+       enum ab8500_chargers online_chg;
+       enum ab8500_chargers prev_online_chg;
+       enum ab8500_chargers charger_type;
+       bool usb_chg_ok;
+       bool ac_chg_ok;
+       int usb_volt;
+       int usb_curr;
+       int ac_volt;
+       int ac_curr;
+       int usb_vset;
+       int usb_iset;
+       int ac_vset;
+       int ac_iset;
+};
+
+struct ab8500_chargalg_suspension_status {
+       bool suspended_change;
+       bool ac_suspended;
+       bool usb_suspended;
+};
+
+struct ab8500_chargalg_current_step_status {
+       bool curr_step_change;
+       int curr_step;
+};
+
+struct ab8500_chargalg_battery_data {
+       int temp;
+       int volt;
+       int avg_curr;
+       int inst_curr;
+       int percent;
+};
+
+enum ab8500_chargalg_states {
+       STATE_HANDHELD_INIT,
+       STATE_HANDHELD,
+       STATE_CHG_NOT_OK_INIT,
+       STATE_CHG_NOT_OK,
+       STATE_HW_TEMP_PROTECT_INIT,
+       STATE_HW_TEMP_PROTECT,
+       STATE_NORMAL_INIT,
+       STATE_NORMAL,
+       STATE_WAIT_FOR_RECHARGE_INIT,
+       STATE_WAIT_FOR_RECHARGE,
+       STATE_MAINTENANCE_A_INIT,
+       STATE_MAINTENANCE_A,
+       STATE_MAINTENANCE_B_INIT,
+       STATE_MAINTENANCE_B,
+       STATE_TEMP_UNDEROVER_INIT,
+       STATE_TEMP_UNDEROVER,
+       STATE_TEMP_LOWHIGH_INIT,
+       STATE_TEMP_LOWHIGH,
+       STATE_SUSPENDED_INIT,
+       STATE_SUSPENDED,
+       STATE_OVV_PROTECT_INIT,
+       STATE_OVV_PROTECT,
+       STATE_SAFETY_TIMER_EXPIRED_INIT,
+       STATE_SAFETY_TIMER_EXPIRED,
+       STATE_BATT_REMOVED_INIT,
+       STATE_BATT_REMOVED,
+       STATE_WD_EXPIRED_INIT,
+       STATE_WD_EXPIRED,
+};
+
+static const char * const states[] = {
+       "HANDHELD_INIT",
+       "HANDHELD",
+       "CHG_NOT_OK_INIT",
+       "CHG_NOT_OK",
+       "HW_TEMP_PROTECT_INIT",
+       "HW_TEMP_PROTECT",
+       "NORMAL_INIT",
+       "NORMAL",
+       "WAIT_FOR_RECHARGE_INIT",
+       "WAIT_FOR_RECHARGE",
+       "MAINTENANCE_A_INIT",
+       "MAINTENANCE_A",
+       "MAINTENANCE_B_INIT",
+       "MAINTENANCE_B",
+       "TEMP_UNDEROVER_INIT",
+       "TEMP_UNDEROVER",
+       "TEMP_LOWHIGH_INIT",
+       "TEMP_LOWHIGH",
+       "SUSPENDED_INIT",
+       "SUSPENDED",
+       "OVV_PROTECT_INIT",
+       "OVV_PROTECT",
+       "SAFETY_TIMER_EXPIRED_INIT",
+       "SAFETY_TIMER_EXPIRED",
+       "BATT_REMOVED_INIT",
+       "BATT_REMOVED",
+       "WD_EXPIRED_INIT",
+       "WD_EXPIRED",
+};
+
+struct ab8500_chargalg_events {
+       bool batt_unknown;
+       bool mainextchnotok;
+       bool batt_ovv;
+       bool batt_rem;
+       bool btemp_underover;
+       bool btemp_lowhigh;
+       bool main_thermal_prot;
+       bool usb_thermal_prot;
+       bool main_ovv;
+       bool vbus_ovv;
+       bool usbchargernotok;
+       bool safety_timer_expired;
+       bool maintenance_timer_expired;
+       bool ac_wd_expired;
+       bool usb_wd_expired;
+       bool ac_cv_active;
+       bool usb_cv_active;
+       bool vbus_collapsed;
+};
+
+/**
+ * struct ab8500_charge_curr_maximization - Charger maximization parameters
+ * @original_iset:     the non optimized/maximised charger current
+ * @current_iset:      the charging current used at this moment
+ * @test_delta_i:      the delta between the current we want to charge and the
+                       current that is really going into the battery
+ * @condition_cnt:     number of iterations needed before a new charger current
+                       is set
+ * @max_current:       maximum charger current
+ * @wait_cnt:          to avoid too fast current step down in case of charger
+ *                     voltage collapse, we insert this delay between step
+ *                     down
+ * @level:             tells in how many steps the charging current has been
+                       increased
+ */
+struct ab8500_charge_curr_maximization {
+       int original_iset;
+       int current_iset;
+       int test_delta_i;
+       int condition_cnt;
+       int max_current;
+       int wait_cnt;
+       u8 level;
+};
+
+enum maxim_ret {
+       MAXIM_RET_NOACTION,
+       MAXIM_RET_CHANGE,
+       MAXIM_RET_IBAT_TOO_HIGH,
+};
+
+/**
+ * struct ab8500_chargalg - ab8500 Charging algorithm device information
+ * @dev:               pointer to the structure device
+ * @charge_status:     battery operating status
+ * @eoc_cnt:           counter used to determine end-of_charge
+ * @maintenance_chg:   indicate if maintenance charge is active
+ * @t_hyst_norm                temperature hysteresis when the temperature has been
+ *                     over or under normal limits
+ * @t_hyst_lowhigh     temperature hysteresis when the temperature has been
+ *                     over or under the high or low limits
+ * @charge_state:      current state of the charging algorithm
+ * @ccm                        charging current maximization parameters
+ * @chg_info:          information about connected charger types
+ * @batt_data:         data of the battery
+ * @susp_status:       current charger suspension status
+ * @bm:                Platform specific battery management information
+ * @curr_status:       Current step status for over-current protection
+ * @parent:            pointer to the struct ab8500
+ * @chargalg_psy:      structure that holds the battery properties exposed by
+ *                     the charging algorithm
+ * @events:            structure for information about events triggered
+ * @chargalg_wq:               work queue for running the charging algorithm
+ * @chargalg_periodic_work:    work to run the charging algorithm periodically
+ * @chargalg_wd_work:          work to kick the charger watchdog periodically
+ * @chargalg_work:             work to run the charging algorithm instantly
+ * @safety_timer:              charging safety timer
+ * @maintenance_timer:         maintenance charging timer
+ * @chargalg_kobject:          structure of type kobject
+ */
+struct ab8500_chargalg {
+       struct device *dev;
+       int charge_status;
+       int eoc_cnt;
+       bool maintenance_chg;
+       int t_hyst_norm;
+       int t_hyst_lowhigh;
+       enum ab8500_chargalg_states charge_state;
+       struct ab8500_charge_curr_maximization ccm;
+       struct ab8500_chargalg_charger_info chg_info;
+       struct ab8500_chargalg_battery_data batt_data;
+       struct ab8500_chargalg_suspension_status susp_status;
+       struct ab8500 *parent;
+       struct ab8500_chargalg_current_step_status curr_status;
+       struct ab8500_bm_data *bm;
+       struct power_supply *chargalg_psy;
+       struct ux500_charger *ac_chg;
+       struct ux500_charger *usb_chg;
+       struct ab8500_chargalg_events events;
+       struct workqueue_struct *chargalg_wq;
+       struct delayed_work chargalg_periodic_work;
+       struct delayed_work chargalg_wd_work;
+       struct work_struct chargalg_work;
+       struct hrtimer safety_timer;
+       struct hrtimer maintenance_timer;
+       struct kobject chargalg_kobject;
+};
+
+/*External charger prepare notifier*/
+BLOCKING_NOTIFIER_HEAD(charger_notifier_list);
+
+/* Main battery properties */
+static enum power_supply_property ab8500_chargalg_props[] = {
+       POWER_SUPPLY_PROP_STATUS,
+       POWER_SUPPLY_PROP_HEALTH,
+};
+
+struct ab8500_chargalg_sysfs_entry {
+       struct attribute attr;
+       ssize_t (*show)(struct ab8500_chargalg *di, char *buf);
+       ssize_t (*store)(struct ab8500_chargalg *di, const char *buf, size_t length);
+};
+
+/**
+ * ab8500_chargalg_safety_timer_expired() - Expiration of the safety timer
+ * @timer:     pointer to the hrtimer structure
+ *
+ * This function gets called when the safety timer for the charger
+ * expires
+ */
+static enum hrtimer_restart
+ab8500_chargalg_safety_timer_expired(struct hrtimer *timer)
+{
+       struct ab8500_chargalg *di = container_of(timer, struct ab8500_chargalg,
+                                                 safety_timer);
+       dev_err(di->dev, "Safety timer expired\n");
+       di->events.safety_timer_expired = true;
+
+       /* Trigger execution of the algorithm instantly */
+       queue_work(di->chargalg_wq, &di->chargalg_work);
+
+       return HRTIMER_NORESTART;
+}
+
+/**
+ * ab8500_chargalg_maintenance_timer_expired() - Expiration of
+ * the maintenance timer
+ * @timer:     pointer to the timer structure
+ *
+ * This function gets called when the maintenence timer
+ * expires
+ */
+static enum hrtimer_restart
+ab8500_chargalg_maintenance_timer_expired(struct hrtimer *timer)
+{
+
+       struct ab8500_chargalg *di = container_of(timer, struct ab8500_chargalg,
+                                                 maintenance_timer);
+
+       dev_dbg(di->dev, "Maintenance timer expired\n");
+       di->events.maintenance_timer_expired = true;
+
+       /* Trigger execution of the algorithm instantly */
+       queue_work(di->chargalg_wq, &di->chargalg_work);
+
+       return HRTIMER_NORESTART;
+}
+
+/**
+ * ab8500_chargalg_state_to() - Change charge state
+ * @di:                pointer to the ab8500_chargalg structure
+ *
+ * This function gets called when a charge state change should occur
+ */
+static void ab8500_chargalg_state_to(struct ab8500_chargalg *di,
+       enum ab8500_chargalg_states state)
+{
+       dev_dbg(di->dev,
+               "State changed: %s (From state: [%d] %s =to=> [%d] %s )\n",
+               di->charge_state == state ? "NO" : "YES",
+               di->charge_state,
+               states[di->charge_state],
+               state,
+               states[state]);
+
+       di->charge_state = state;
+}
+
+static int ab8500_chargalg_check_charger_enable(struct ab8500_chargalg *di)
+{
+       switch (di->charge_state) {
+       case STATE_NORMAL:
+       case STATE_MAINTENANCE_A:
+       case STATE_MAINTENANCE_B:
+               break;
+       default:
+               return 0;
+       }
+
+       if (di->chg_info.charger_type & USB_CHG) {
+               return di->usb_chg->ops.check_enable(di->usb_chg,
+                       di->bm->bat_type[di->bm->batt_id].normal_vol_lvl,
+                       di->bm->bat_type[di->bm->batt_id].normal_cur_lvl);
+       } else if ((di->chg_info.charger_type & AC_CHG) &&
+                  !(di->ac_chg->external)) {
+               return di->ac_chg->ops.check_enable(di->ac_chg,
+                       di->bm->bat_type[di->bm->batt_id].normal_vol_lvl,
+                       di->bm->bat_type[di->bm->batt_id].normal_cur_lvl);
+       }
+       return 0;
+}
+
+/**
+ * ab8500_chargalg_check_charger_connection() - Check charger connection change
+ * @di:                pointer to the ab8500_chargalg structure
+ *
+ * This function will check if there is a change in the charger connection
+ * and change charge state accordingly. AC has precedence over USB.
+ */
+static int ab8500_chargalg_check_charger_connection(struct ab8500_chargalg *di)
+{
+       if (di->chg_info.conn_chg != di->chg_info.prev_conn_chg ||
+               di->susp_status.suspended_change) {
+               /*
+                * Charger state changed or suspension
+                * has changed since last update
+                */
+               if ((di->chg_info.conn_chg & AC_CHG) &&
+                       !di->susp_status.ac_suspended) {
+                       dev_dbg(di->dev, "Charging source is AC\n");
+                       if (di->chg_info.charger_type != AC_CHG) {
+                               di->chg_info.charger_type = AC_CHG;
+                               ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
+                       }
+               } else if ((di->chg_info.conn_chg & USB_CHG) &&
+                       !di->susp_status.usb_suspended) {
+                       dev_dbg(di->dev, "Charging source is USB\n");
+                       di->chg_info.charger_type = USB_CHG;
+                       ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
+               } else if (di->chg_info.conn_chg &&
+                       (di->susp_status.ac_suspended ||
+                       di->susp_status.usb_suspended)) {
+                       dev_dbg(di->dev, "Charging is suspended\n");
+                       di->chg_info.charger_type = NO_CHG;
+                       ab8500_chargalg_state_to(di, STATE_SUSPENDED_INIT);
+               } else {
+                       dev_dbg(di->dev, "Charging source is OFF\n");
+                       di->chg_info.charger_type = NO_CHG;
+                       ab8500_chargalg_state_to(di, STATE_HANDHELD_INIT);
+               }
+               di->chg_info.prev_conn_chg = di->chg_info.conn_chg;
+               di->susp_status.suspended_change = false;
+       }
+       return di->chg_info.conn_chg;
+}
+
+/**
+ * ab8500_chargalg_check_current_step_status() - Check charging current
+ * step status.
+ * @di:                pointer to the ab8500_chargalg structure
+ *
+ * This function will check if there is a change in the charging current step
+ * and change charge state accordingly.
+ */
+static void ab8500_chargalg_check_current_step_status
+       (struct ab8500_chargalg *di)
+{
+       if (di->curr_status.curr_step_change)
+               ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
+       di->curr_status.curr_step_change = false;
+}
+
+/**
+ * ab8500_chargalg_start_safety_timer() - Start charging safety timer
+ * @di:                pointer to the ab8500_chargalg structure
+ *
+ * The safety timer is used to avoid overcharging of old or bad batteries.
+ * There are different timers for AC and USB
+ */
+static void ab8500_chargalg_start_safety_timer(struct ab8500_chargalg *di)
+{
+       /* Charger-dependent expiration time in hours*/
+       int timer_expiration = 0;
+
+       switch (di->chg_info.charger_type) {
+       case AC_CHG:
+               timer_expiration = di->bm->main_safety_tmr_h;
+               break;
+
+       case USB_CHG:
+               timer_expiration = di->bm->usb_safety_tmr_h;
+               break;
+
+       default:
+               dev_err(di->dev, "Unknown charger to charge from\n");
+               break;
+       }
+
+       di->events.safety_timer_expired = false;
+       hrtimer_set_expires_range(&di->safety_timer,
+               ktime_set(timer_expiration * ONE_HOUR_IN_SECONDS, 0),
+               ktime_set(FIVE_MINUTES_IN_SECONDS, 0));
+       hrtimer_start_expires(&di->safety_timer, HRTIMER_MODE_REL);
+}
+
+/**
+ * ab8500_chargalg_stop_safety_timer() - Stop charging safety timer
+ * @di:                pointer to the ab8500_chargalg structure
+ *
+ * The safety timer is stopped whenever the NORMAL state is exited
+ */
+static void ab8500_chargalg_stop_safety_timer(struct ab8500_chargalg *di)
+{
+       if (hrtimer_try_to_cancel(&di->safety_timer) >= 0)
+               di->events.safety_timer_expired = false;
+}
+
+/**
+ * ab8500_chargalg_start_maintenance_timer() - Start charging maintenance timer
+ * @di:                pointer to the ab8500_chargalg structure
+ * @duration:  duration of ther maintenance timer in hours
+ *
+ * The maintenance timer is used to maintain the charge in the battery once
+ * the battery is considered full. These timers are chosen to match the
+ * discharge curve of the battery
+ */
+static void ab8500_chargalg_start_maintenance_timer(struct ab8500_chargalg *di,
+       int duration)
+{
+       hrtimer_set_expires_range(&di->maintenance_timer,
+               ktime_set(duration * ONE_HOUR_IN_SECONDS, 0),
+               ktime_set(FIVE_MINUTES_IN_SECONDS, 0));
+       di->events.maintenance_timer_expired = false;
+       hrtimer_start_expires(&di->maintenance_timer, HRTIMER_MODE_REL);
+}
+
+/**
+ * ab8500_chargalg_stop_maintenance_timer() - Stop maintenance timer
+ * @di:                pointer to the ab8500_chargalg structure
+ *
+ * The maintenance timer is stopped whenever maintenance ends or when another
+ * state is entered
+ */
+static void ab8500_chargalg_stop_maintenance_timer(struct ab8500_chargalg *di)
+{
+       if (hrtimer_try_to_cancel(&di->maintenance_timer) >= 0)
+               di->events.maintenance_timer_expired = false;
+}
+
+/**
+ * ab8500_chargalg_kick_watchdog() - Kick charger watchdog
+ * @di:                pointer to the ab8500_chargalg structure
+ *
+ * The charger watchdog have to be kicked periodically whenever the charger is
+ * on, else the ABB will reset the system
+ */
+static int ab8500_chargalg_kick_watchdog(struct ab8500_chargalg *di)
+{
+       /* Check if charger exists and kick watchdog if charging */
+       if (di->ac_chg && di->ac_chg->ops.kick_wd &&
+           di->chg_info.online_chg & AC_CHG) {
+               /*
+                * If AB charger watchdog expired, pm2xxx charging
+                * gets disabled. To be safe, kick both AB charger watchdog
+                * and pm2xxx watchdog.
+                */
+               if (di->ac_chg->external &&
+                   di->usb_chg && di->usb_chg->ops.kick_wd)
+                       di->usb_chg->ops.kick_wd(di->usb_chg);
+
+               return di->ac_chg->ops.kick_wd(di->ac_chg);
+       } else if (di->usb_chg && di->usb_chg->ops.kick_wd &&
+                       di->chg_info.online_chg & USB_CHG)
+               return di->usb_chg->ops.kick_wd(di->usb_chg);
+
+       return -ENXIO;
+}
+
+/**
+ * ab8500_chargalg_ac_en() - Turn on/off the AC charger
+ * @di:                pointer to the ab8500_chargalg structure
+ * @enable:    charger on/off
+ * @vset:      requested charger output voltage
+ * @iset:      requested charger output current
+ *
+ * The AC charger will be turned on/off with the requested charge voltage and
+ * current
+ */
+static int ab8500_chargalg_ac_en(struct ab8500_chargalg *di, int enable,
+       int vset, int iset)
+{
+       static int ab8500_chargalg_ex_ac_enable_toggle;
+
+       if (!di->ac_chg || !di->ac_chg->ops.enable)
+               return -ENXIO;
+
+       /* Select maximum of what both the charger and the battery supports */
+       if (di->ac_chg->max_out_volt)
+               vset = min(vset, di->ac_chg->max_out_volt);
+       if (di->ac_chg->max_out_curr)
+               iset = min(iset, di->ac_chg->max_out_curr);
+
+       di->chg_info.ac_iset = iset;
+       di->chg_info.ac_vset = vset;
+
+       /* Enable external charger */
+       if (enable && di->ac_chg->external &&
+           !ab8500_chargalg_ex_ac_enable_toggle) {
+               blocking_notifier_call_chain(&charger_notifier_list,
+                                            0, di->dev);
+               ab8500_chargalg_ex_ac_enable_toggle++;
+       }
+
+       return di->ac_chg->ops.enable(di->ac_chg, enable, vset, iset);
+}
+
+/**
+ * ab8500_chargalg_usb_en() - Turn on/off the USB charger
+ * @di:                pointer to the ab8500_chargalg structure
+ * @enable:    charger on/off
+ * @vset:      requested charger output voltage
+ * @iset:      requested charger output current
+ *
+ * The USB charger will be turned on/off with the requested charge voltage and
+ * current
+ */
+static int ab8500_chargalg_usb_en(struct ab8500_chargalg *di, int enable,
+       int vset, int iset)
+{
+       if (!di->usb_chg || !di->usb_chg->ops.enable)
+               return -ENXIO;
+
+       /* Select maximum of what both the charger and the battery supports */
+       if (di->usb_chg->max_out_volt)
+               vset = min(vset, di->usb_chg->max_out_volt);
+       if (di->usb_chg->max_out_curr)
+               iset = min(iset, di->usb_chg->max_out_curr);
+
+       di->chg_info.usb_iset = iset;
+       di->chg_info.usb_vset = vset;
+
+       return di->usb_chg->ops.enable(di->usb_chg, enable, vset, iset);
+}
+
+/**
+ * ab8500_chargalg_update_chg_curr() - Update charger current
+ * @di:                pointer to the ab8500_chargalg structure
+ * @iset:      requested charger output current
+ *
+ * The charger output current will be updated for the charger
+ * that is currently in use
+ */
+static int ab8500_chargalg_update_chg_curr(struct ab8500_chargalg *di,
+               int iset)
+{
+       /* Check if charger exists and update current if charging */
+       if (di->ac_chg && di->ac_chg->ops.update_curr &&
+                       di->chg_info.charger_type & AC_CHG) {
+               /*
+                * Select maximum of what both the charger
+                * and the battery supports
+                */
+               if (di->ac_chg->max_out_curr)
+                       iset = min(iset, di->ac_chg->max_out_curr);
+
+               di->chg_info.ac_iset = iset;
+
+               return di->ac_chg->ops.update_curr(di->ac_chg, iset);
+       } else if (di->usb_chg && di->usb_chg->ops.update_curr &&
+                       di->chg_info.charger_type & USB_CHG) {
+               /*
+                * Select maximum of what both the charger
+                * and the battery supports
+                */
+               if (di->usb_chg->max_out_curr)
+                       iset = min(iset, di->usb_chg->max_out_curr);
+
+               di->chg_info.usb_iset = iset;
+
+               return di->usb_chg->ops.update_curr(di->usb_chg, iset);
+       }
+
+       return -ENXIO;
+}
+
+/**
+ * ab8500_chargalg_stop_charging() - Stop charging
+ * @di:                pointer to the ab8500_chargalg structure
+ *
+ * This function is called from any state where charging should be stopped.
+ * All charging is disabled and all status parameters and timers are changed
+ * accordingly
+ */
+static void ab8500_chargalg_stop_charging(struct ab8500_chargalg *di)
+{
+       ab8500_chargalg_ac_en(di, false, 0, 0);
+       ab8500_chargalg_usb_en(di, false, 0, 0);
+       ab8500_chargalg_stop_safety_timer(di);
+       ab8500_chargalg_stop_maintenance_timer(di);
+       di->charge_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+       di->maintenance_chg = false;
+       cancel_delayed_work(&di->chargalg_wd_work);
+       power_supply_changed(di->chargalg_psy);
+}
+
+/**
+ * ab8500_chargalg_hold_charging() - Pauses charging
+ * @di:                pointer to the ab8500_chargalg structure
+ *
+ * This function is called in the case where maintenance charging has been
+ * disabled and instead a battery voltage mode is entered to check when the
+ * battery voltage has reached a certain recharge voltage
+ */
+static void ab8500_chargalg_hold_charging(struct ab8500_chargalg *di)
+{
+       ab8500_chargalg_ac_en(di, false, 0, 0);
+       ab8500_chargalg_usb_en(di, false, 0, 0);
+       ab8500_chargalg_stop_safety_timer(di);
+       ab8500_chargalg_stop_maintenance_timer(di);
+       di->charge_status = POWER_SUPPLY_STATUS_CHARGING;
+       di->maintenance_chg = false;
+       cancel_delayed_work(&di->chargalg_wd_work);
+       power_supply_changed(di->chargalg_psy);
+}
+
+/**
+ * ab8500_chargalg_start_charging() - Start the charger
+ * @di:                pointer to the ab8500_chargalg structure
+ * @vset:      requested charger output voltage
+ * @iset:      requested charger output current
+ *
+ * A charger will be enabled depending on the requested charger type that was
+ * detected previously.
+ */
+static void ab8500_chargalg_start_charging(struct ab8500_chargalg *di,
+       int vset, int iset)
+{
+       switch (di->chg_info.charger_type) {
+       case AC_CHG:
+               dev_dbg(di->dev,
+                       "AC parameters: Vset %d, Ich %d\n", vset, iset);
+               ab8500_chargalg_usb_en(di, false, 0, 0);
+               ab8500_chargalg_ac_en(di, true, vset, iset);
+               break;
+
+       case USB_CHG:
+               dev_dbg(di->dev,
+                       "USB parameters: Vset %d, Ich %d\n", vset, iset);
+               ab8500_chargalg_ac_en(di, false, 0, 0);
+               ab8500_chargalg_usb_en(di, true, vset, iset);
+               break;
+
+       default:
+               dev_err(di->dev, "Unknown charger to charge from\n");
+               break;
+       }
+}
+
+/**
+ * ab8500_chargalg_check_temp() - Check battery temperature ranges
+ * @di:                pointer to the ab8500_chargalg structure
+ *
+ * The battery temperature is checked against the predefined limits and the
+ * charge state is changed accordingly
+ */
+static void ab8500_chargalg_check_temp(struct ab8500_chargalg *di)
+{
+       if (di->batt_data.temp > (di->bm->temp_low + di->t_hyst_norm) &&
+               di->batt_data.temp < (di->bm->temp_high - di->t_hyst_norm)) {
+               /* Temp OK! */
+               di->events.btemp_underover = false;
+               di->events.btemp_lowhigh = false;
+               di->t_hyst_norm = 0;
+               di->t_hyst_lowhigh = 0;
+       } else {
+               if (((di->batt_data.temp >= di->bm->temp_high) &&
+                       (di->batt_data.temp <
+                               (di->bm->temp_over - di->t_hyst_lowhigh))) ||
+                       ((di->batt_data.temp >
+                               (di->bm->temp_under + di->t_hyst_lowhigh)) &&
+                       (di->batt_data.temp <= di->bm->temp_low))) {
+                       /* TEMP minor!!!!! */
+                       di->events.btemp_underover = false;
+                       di->events.btemp_lowhigh = true;
+                       di->t_hyst_norm = di->bm->temp_hysteresis;
+                       di->t_hyst_lowhigh = 0;
+               } else if (di->batt_data.temp <= di->bm->temp_under ||
+                       di->batt_data.temp >= di->bm->temp_over) {
+                       /* TEMP major!!!!! */
+                       di->events.btemp_underover = true;
+                       di->events.btemp_lowhigh = false;
+                       di->t_hyst_norm = 0;
+                       di->t_hyst_lowhigh = di->bm->temp_hysteresis;
+               } else {
+                       /* Within hysteresis */
+                       dev_dbg(di->dev, "Within hysteresis limit temp: %d "
+                               "hyst_lowhigh %d, hyst normal %d\n",
+                               di->batt_data.temp, di->t_hyst_lowhigh,
+                               di->t_hyst_norm);
+               }
+       }
+}
+
+/**
+ * ab8500_chargalg_check_charger_voltage() - Check charger voltage
+ * @di:                pointer to the ab8500_chargalg structure
+ *
+ * Charger voltage is checked against maximum limit
+ */
+static void ab8500_chargalg_check_charger_voltage(struct ab8500_chargalg *di)
+{
+       if (di->chg_info.usb_volt > di->bm->chg_params->usb_volt_max)
+               di->chg_info.usb_chg_ok = false;
+       else
+               di->chg_info.usb_chg_ok = true;
+
+       if (di->chg_info.ac_volt > di->bm->chg_params->ac_volt_max)
+               di->chg_info.ac_chg_ok = false;
+       else
+               di->chg_info.ac_chg_ok = true;
+
+}
+
+/**
+ * ab8500_chargalg_end_of_charge() - Check if end-of-charge criteria is fulfilled
+ * @di:                pointer to the ab8500_chargalg structure
+ *
+ * End-of-charge criteria is fulfilled when the battery voltage is above a
+ * certain limit and the battery current is below a certain limit for a
+ * predefined number of consecutive seconds. If true, the battery is full
+ */
+static void ab8500_chargalg_end_of_charge(struct ab8500_chargalg *di)
+{
+       if (di->charge_status == POWER_SUPPLY_STATUS_CHARGING &&
+               di->charge_state == STATE_NORMAL &&
+               !di->maintenance_chg && (di->batt_data.volt >=
+               di->bm->bat_type[di->bm->batt_id].termination_vol ||
+               di->events.usb_cv_active || di->events.ac_cv_active) &&
+               di->batt_data.avg_curr <
+               di->bm->bat_type[di->bm->batt_id].termination_curr &&
+               di->batt_data.avg_curr > 0) {
+               if (++di->eoc_cnt >= EOC_COND_CNT) {
+                       di->eoc_cnt = 0;
+                       di->charge_status = POWER_SUPPLY_STATUS_FULL;
+                       di->maintenance_chg = true;
+                       dev_dbg(di->dev, "EOC reached!\n");
+                       power_supply_changed(di->chargalg_psy);
+               } else {
+                       dev_dbg(di->dev,
+                               " EOC limit reached for the %d"
+                               " time, out of %d before EOC\n",
+                               di->eoc_cnt,
+                               EOC_COND_CNT);
+               }
+       } else {
+               di->eoc_cnt = 0;
+       }
+}
+
+static void init_maxim_chg_curr(struct ab8500_chargalg *di)
+{
+       di->ccm.original_iset =
+               di->bm->bat_type[di->bm->batt_id].normal_cur_lvl;
+       di->ccm.current_iset =
+               di->bm->bat_type[di->bm->batt_id].normal_cur_lvl;
+       di->ccm.test_delta_i = di->bm->maxi->charger_curr_step;
+       di->ccm.max_current = di->bm->maxi->chg_curr;
+       di->ccm.condition_cnt = di->bm->maxi->wait_cycles;
+       di->ccm.level = 0;
+}
+
+/**
+ * ab8500_chargalg_chg_curr_maxim - increases the charger current to
+ *                     compensate for the system load
+ * @di         pointer to the ab8500_chargalg structure
+ *
+ * This maximization function is used to raise the charger current to get the
+ * battery current as close to the optimal value as possible. The battery
+ * current during charging is affected by the system load
+ */
+static enum maxim_ret ab8500_chargalg_chg_curr_maxim(struct ab8500_chargalg *di)
+{
+       int delta_i;
+
+       if (!di->bm->maxi->ena_maxi)
+               return MAXIM_RET_NOACTION;
+
+       delta_i = di->ccm.original_iset - di->batt_data.inst_curr;
+
+       if (di->events.vbus_collapsed) {
+               dev_dbg(di->dev, "Charger voltage has collapsed %d\n",
+                               di->ccm.wait_cnt);
+               if (di->ccm.wait_cnt == 0) {
+                       dev_dbg(di->dev, "lowering current\n");
+                       di->ccm.wait_cnt++;
+                       di->ccm.condition_cnt = di->bm->maxi->wait_cycles;
+                       di->ccm.max_current =
+                               di->ccm.current_iset - di->ccm.test_delta_i;
+                       di->ccm.current_iset = di->ccm.max_current;
+                       di->ccm.level--;
+                       return MAXIM_RET_CHANGE;
+               } else {
+                       dev_dbg(di->dev, "waiting\n");
+                       /* Let's go in here twice before lowering curr again */
+                       di->ccm.wait_cnt = (di->ccm.wait_cnt + 1) % 3;
+                       return MAXIM_RET_NOACTION;
+               }
+       }
+
+       di->ccm.wait_cnt = 0;
+
+       if (di->batt_data.inst_curr > di->ccm.original_iset) {
+               dev_dbg(di->dev, " Maximization Ibat (%dmA) too high"
+                       " (limit %dmA) (current iset: %dmA)!\n",
+                       di->batt_data.inst_curr, di->ccm.original_iset,
+                       di->ccm.current_iset);
+
+               if (di->ccm.current_iset == di->ccm.original_iset)
+                       return MAXIM_RET_NOACTION;
+
+               di->ccm.condition_cnt = di->bm->maxi->wait_cycles;
+               di->ccm.current_iset = di->ccm.original_iset;
+               di->ccm.level = 0;
+
+               return MAXIM_RET_IBAT_TOO_HIGH;
+       }
+
+       if (delta_i > di->ccm.test_delta_i &&
+               (di->ccm.current_iset + di->ccm.test_delta_i) <
+               di->ccm.max_current) {
+               if (di->ccm.condition_cnt-- == 0) {
+                       /* Increse the iset with cco.test_delta_i */
+                       di->ccm.condition_cnt = di->bm->maxi->wait_cycles;
+                       di->ccm.current_iset += di->ccm.test_delta_i;
+                       di->ccm.level++;
+                       dev_dbg(di->dev, " Maximization needed, increase"
+                               " with %d mA to %dmA (Optimal ibat: %d)"
+                               " Level %d\n",
+                               di->ccm.test_delta_i,
+                               di->ccm.current_iset,
+                               di->ccm.original_iset,
+                               di->ccm.level);
+                       return MAXIM_RET_CHANGE;
+               } else {
+                       return MAXIM_RET_NOACTION;
+               }
+       }  else {
+               di->ccm.condition_cnt = di->bm->maxi->wait_cycles;
+               return MAXIM_RET_NOACTION;
+       }
+}
+
+static void handle_maxim_chg_curr(struct ab8500_chargalg *di)
+{
+       enum maxim_ret ret;
+       int result;
+
+       ret = ab8500_chargalg_chg_curr_maxim(di);
+       switch (ret) {
+       case MAXIM_RET_CHANGE:
+               result = ab8500_chargalg_update_chg_curr(di,
+                       di->ccm.current_iset);
+               if (result)
+                       dev_err(di->dev, "failed to set chg curr\n");
+               break;
+       case MAXIM_RET_IBAT_TOO_HIGH:
+               result = ab8500_chargalg_update_chg_curr(di,
+                       di->bm->bat_type[di->bm->batt_id].normal_cur_lvl);
+               if (result)
+                       dev_err(di->dev, "failed to set chg curr\n");
+               break;
+
+       case MAXIM_RET_NOACTION:
+       default:
+               /* Do nothing..*/
+               break;
+       }
+}
+
+static int ab8500_chargalg_get_ext_psy_data(struct device *dev, void *data)
+{
+       struct power_supply *psy;
+       struct power_supply *ext = dev_get_drvdata(dev);
+       const char **supplicants = (const char **)ext->supplied_to;
+       struct ab8500_chargalg *di;
+       union power_supply_propval ret;
+       int j;
+       bool capacity_updated = false;
+
+       psy = (struct power_supply *)data;
+       di = power_supply_get_drvdata(psy);
+       /* For all psy where the driver name appears in any supplied_to */
+       j = match_string(supplicants, ext->num_supplicants, psy->desc->name);
+       if (j < 0)
+               return 0;
+
+       /*
+        *  If external is not registering 'POWER_SUPPLY_PROP_CAPACITY' to its
+        * property because of handling that sysfs entry on its own, this is
+        * the place to get the battery capacity.
+        */
+       if (!power_supply_get_property(ext, POWER_SUPPLY_PROP_CAPACITY, &ret)) {
+               di->batt_data.percent = ret.intval;
+               capacity_updated = true;
+       }
+
+       /* Go through all properties for the psy */
+       for (j = 0; j < ext->desc->num_properties; j++) {
+               enum power_supply_property prop;
+               prop = ext->desc->properties[j];
+
+               /*
+                * Initialize chargers if not already done.
+                * The ab8500_charger*/
+               if (!di->ac_chg &&
+                       ext->desc->type == POWER_SUPPLY_TYPE_MAINS)
+                       di->ac_chg = psy_to_ux500_charger(ext);
+               else if (!di->usb_chg &&
+                       ext->desc->type == POWER_SUPPLY_TYPE_USB)
+                       di->usb_chg = psy_to_ux500_charger(ext);
+
+               if (power_supply_get_property(ext, prop, &ret))
+                       continue;
+               switch (prop) {
+               case POWER_SUPPLY_PROP_PRESENT:
+                       switch (ext->desc->type) {
+                       case POWER_SUPPLY_TYPE_BATTERY:
+                               /* Battery present */
+                               if (ret.intval)
+                                       di->events.batt_rem = false;
+                               /* Battery removed */
+                               else
+                                       di->events.batt_rem = true;
+                               break;
+                       case POWER_SUPPLY_TYPE_MAINS:
+                               /* AC disconnected */
+                               if (!ret.intval &&
+                                       (di->chg_info.conn_chg & AC_CHG)) {
+                                       di->chg_info.prev_conn_chg =
+                                               di->chg_info.conn_chg;
+                                       di->chg_info.conn_chg &= ~AC_CHG;
+                               }
+                               /* AC connected */
+                               else if (ret.intval &&
+                                       !(di->chg_info.conn_chg & AC_CHG)) {
+                                       di->chg_info.prev_conn_chg =
+                                               di->chg_info.conn_chg;
+                                       di->chg_info.conn_chg |= AC_CHG;
+                               }
+                               break;
+                       case POWER_SUPPLY_TYPE_USB:
+                               /* USB disconnected */
+                               if (!ret.intval &&
+                                       (di->chg_info.conn_chg & USB_CHG)) {
+                                       di->chg_info.prev_conn_chg =
+                                               di->chg_info.conn_chg;
+                                       di->chg_info.conn_chg &= ~USB_CHG;
+                               }
+                               /* USB connected */
+                               else if (ret.intval &&
+                                       !(di->chg_info.conn_chg & USB_CHG)) {
+                                       di->chg_info.prev_conn_chg =
+                                               di->chg_info.conn_chg;
+                                       di->chg_info.conn_chg |= USB_CHG;
+                               }
+                               break;
+                       default:
+                               break;
+                       }
+                       break;
+
+               case POWER_SUPPLY_PROP_ONLINE:
+                       switch (ext->desc->type) {
+                       case POWER_SUPPLY_TYPE_BATTERY:
+                               break;
+                       case POWER_SUPPLY_TYPE_MAINS:
+                               /* AC offline */
+                               if (!ret.intval &&
+                                       (di->chg_info.online_chg & AC_CHG)) {
+                                       di->chg_info.prev_online_chg =
+                                               di->chg_info.online_chg;
+                                       di->chg_info.online_chg &= ~AC_CHG;
+                               }
+                               /* AC online */
+                               else if (ret.intval &&
+                                       !(di->chg_info.online_chg & AC_CHG)) {
+                                       di->chg_info.prev_online_chg =
+                                               di->chg_info.online_chg;
+                                       di->chg_info.online_chg |= AC_CHG;
+                                       queue_delayed_work(di->chargalg_wq,
+                                               &di->chargalg_wd_work, 0);
+                               }
+                               break;
+                       case POWER_SUPPLY_TYPE_USB:
+                               /* USB offline */
+                               if (!ret.intval &&
+                                       (di->chg_info.online_chg & USB_CHG)) {
+                                       di->chg_info.prev_online_chg =
+                                               di->chg_info.online_chg;
+                                       di->chg_info.online_chg &= ~USB_CHG;
+                               }
+                               /* USB online */
+                               else if (ret.intval &&
+                                       !(di->chg_info.online_chg & USB_CHG)) {
+                                       di->chg_info.prev_online_chg =
+                                               di->chg_info.online_chg;
+                                       di->chg_info.online_chg |= USB_CHG;
+                                       queue_delayed_work(di->chargalg_wq,
+                                               &di->chargalg_wd_work, 0);
+                               }
+                               break;
+                       default:
+                               break;
+                       }
+                       break;
+
+               case POWER_SUPPLY_PROP_HEALTH:
+                       switch (ext->desc->type) {
+                       case POWER_SUPPLY_TYPE_BATTERY:
+                               break;
+                       case POWER_SUPPLY_TYPE_MAINS:
+                               switch (ret.intval) {
+                               case POWER_SUPPLY_HEALTH_UNSPEC_FAILURE:
+                                       di->events.mainextchnotok = true;
+                                       di->events.main_thermal_prot = false;
+                                       di->events.main_ovv = false;
+                                       di->events.ac_wd_expired = false;
+                                       break;
+                               case POWER_SUPPLY_HEALTH_DEAD:
+                                       di->events.ac_wd_expired = true;
+                                       di->events.mainextchnotok = false;
+                                       di->events.main_ovv = false;
+                                       di->events.main_thermal_prot = false;
+                                       break;
+                               case POWER_SUPPLY_HEALTH_COLD:
+                               case POWER_SUPPLY_HEALTH_OVERHEAT:
+                                       di->events.main_thermal_prot = true;
+                                       di->events.mainextchnotok = false;
+                                       di->events.main_ovv = false;
+                                       di->events.ac_wd_expired = false;
+                                       break;
+                               case POWER_SUPPLY_HEALTH_OVERVOLTAGE:
+                                       di->events.main_ovv = true;
+                                       di->events.mainextchnotok = false;
+                                       di->events.main_thermal_prot = false;
+                                       di->events.ac_wd_expired = false;
+                                       break;
+                               case POWER_SUPPLY_HEALTH_GOOD:
+                                       di->events.main_thermal_prot = false;
+                                       di->events.mainextchnotok = false;
+                                       di->events.main_ovv = false;
+                                       di->events.ac_wd_expired = false;
+                                       break;
+                               default:
+                                       break;
+                               }
+                               break;
+
+                       case POWER_SUPPLY_TYPE_USB:
+                               switch (ret.intval) {
+                               case POWER_SUPPLY_HEALTH_UNSPEC_FAILURE:
+                                       di->events.usbchargernotok = true;
+                                       di->events.usb_thermal_prot = false;
+                                       di->events.vbus_ovv = false;
+                                       di->events.usb_wd_expired = false;
+                                       break;
+                               case POWER_SUPPLY_HEALTH_DEAD:
+                                       di->events.usb_wd_expired = true;
+                                       di->events.usbchargernotok = false;
+                                       di->events.usb_thermal_prot = false;
+                                       di->events.vbus_ovv = false;
+                                       break;
+                               case POWER_SUPPLY_HEALTH_COLD:
+                               case POWER_SUPPLY_HEALTH_OVERHEAT:
+                                       di->events.usb_thermal_prot = true;
+                                       di->events.usbchargernotok = false;
+                                       di->events.vbus_ovv = false;
+                                       di->events.usb_wd_expired = false;
+                                       break;
+                               case POWER_SUPPLY_HEALTH_OVERVOLTAGE:
+                                       di->events.vbus_ovv = true;
+                                       di->events.usbchargernotok = false;
+                                       di->events.usb_thermal_prot = false;
+                                       di->events.usb_wd_expired = false;
+                                       break;
+                               case POWER_SUPPLY_HEALTH_GOOD:
+                                       di->events.usbchargernotok = false;
+                                       di->events.usb_thermal_prot = false;
+                                       di->events.vbus_ovv = false;
+                                       di->events.usb_wd_expired = false;
+                                       break;
+                               default:
+                                       break;
+                               }
+                               break;
+                       default:
+                               break;
+                       }
+                       break;
+
+               case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+                       switch (ext->desc->type) {
+                       case POWER_SUPPLY_TYPE_BATTERY:
+                               di->batt_data.volt = ret.intval / 1000;
+                               break;
+                       case POWER_SUPPLY_TYPE_MAINS:
+                               di->chg_info.ac_volt = ret.intval / 1000;
+                               break;
+                       case POWER_SUPPLY_TYPE_USB:
+                               di->chg_info.usb_volt = ret.intval / 1000;
+                               break;
+                       default:
+                               break;
+                       }
+                       break;
+
+               case POWER_SUPPLY_PROP_VOLTAGE_AVG:
+                       switch (ext->desc->type) {
+                       case POWER_SUPPLY_TYPE_MAINS:
+                               /* AVG is used to indicate when we are
+                                * in CV mode */
+                               if (ret.intval)
+                                       di->events.ac_cv_active = true;
+                               else
+                                       di->events.ac_cv_active = false;
+
+                               break;
+                       case POWER_SUPPLY_TYPE_USB:
+                               /* AVG is used to indicate when we are
+                                * in CV mode */
+                               if (ret.intval)
+                                       di->events.usb_cv_active = true;
+                               else
+                                       di->events.usb_cv_active = false;
+
+                               break;
+                       default:
+                               break;
+                       }
+                       break;
+
+               case POWER_SUPPLY_PROP_TECHNOLOGY:
+                       switch (ext->desc->type) {
+                       case POWER_SUPPLY_TYPE_BATTERY:
+                               if (ret.intval)
+                                       di->events.batt_unknown = false;
+                               else
+                                       di->events.batt_unknown = true;
+
+                               break;
+                       default:
+                               break;
+                       }
+                       break;
+
+               case POWER_SUPPLY_PROP_TEMP:
+                       di->batt_data.temp = ret.intval / 10;
+                       break;
+
+               case POWER_SUPPLY_PROP_CURRENT_NOW:
+                       switch (ext->desc->type) {
+                       case POWER_SUPPLY_TYPE_MAINS:
+                                       di->chg_info.ac_curr =
+                                               ret.intval / 1000;
+                                       break;
+                       case POWER_SUPPLY_TYPE_USB:
+                                       di->chg_info.usb_curr =
+                                               ret.intval / 1000;
+                               break;
+                       case POWER_SUPPLY_TYPE_BATTERY:
+                               di->batt_data.inst_curr = ret.intval / 1000;
+                               break;
+                       default:
+                               break;
+                       }
+                       break;
+
+               case POWER_SUPPLY_PROP_CURRENT_AVG:
+                       switch (ext->desc->type) {
+                       case POWER_SUPPLY_TYPE_BATTERY:
+                               di->batt_data.avg_curr = ret.intval / 1000;
+                               break;
+                       case POWER_SUPPLY_TYPE_USB:
+                               if (ret.intval)
+                                       di->events.vbus_collapsed = true;
+                               else
+                                       di->events.vbus_collapsed = false;
+                               break;
+                       default:
+                               break;
+                       }
+                       break;
+               case POWER_SUPPLY_PROP_CAPACITY:
+                       if (!capacity_updated)
+                               di->batt_data.percent = ret.intval;
+                       break;
+               default:
+                       break;
+               }
+       }
+       return 0;
+}
+
+/**
+ * ab8500_chargalg_external_power_changed() - callback for power supply changes
+ * @psy:       pointer to the structure power_supply
+ *
+ * This function is the entry point of the pointer external_power_changed
+ * of the structure power_supply.
+ * This function gets executed when there is a change in any external power
+ * supply that this driver needs to be notified of.
+ */
+static void ab8500_chargalg_external_power_changed(struct power_supply *psy)
+{
+       struct ab8500_chargalg *di = power_supply_get_drvdata(psy);
+
+       /*
+        * Trigger execution of the algorithm instantly and read
+        * all power_supply properties there instead
+        */
+       if (di->chargalg_wq)
+               queue_work(di->chargalg_wq, &di->chargalg_work);
+}
+
+/**
+ * ab8500_chargalg_algorithm() - Main function for the algorithm
+ * @di:                pointer to the ab8500_chargalg structure
+ *
+ * This is the main control function for the charging algorithm.
+ * It is called periodically or when something happens that will
+ * trigger a state change
+ */
+static void ab8500_chargalg_algorithm(struct ab8500_chargalg *di)
+{
+       int charger_status;
+       int ret;
+       int curr_step_lvl;
+
+       /* Collect data from all power_supply class devices */
+       class_for_each_device(power_supply_class, NULL,
+               di->chargalg_psy, ab8500_chargalg_get_ext_psy_data);
+
+       ab8500_chargalg_end_of_charge(di);
+       ab8500_chargalg_check_temp(di);
+       ab8500_chargalg_check_charger_voltage(di);
+
+       charger_status = ab8500_chargalg_check_charger_connection(di);
+       ab8500_chargalg_check_current_step_status(di);
+
+       if (is_ab8500(di->parent)) {
+               ret = ab8500_chargalg_check_charger_enable(di);
+               if (ret < 0)
+                       dev_err(di->dev, "Checking charger is enabled error"
+                                       ": Returned Value %d\n", ret);
+       }
+
+       /*
+        * First check if we have a charger connected.
+        * Also we don't allow charging of unknown batteries if configured
+        * this way
+        */
+       if (!charger_status ||
+               (di->events.batt_unknown && !di->bm->chg_unknown_bat)) {
+               if (di->charge_state != STATE_HANDHELD) {
+                       di->events.safety_timer_expired = false;
+                       ab8500_chargalg_state_to(di, STATE_HANDHELD_INIT);
+               }
+       }
+
+       /* If suspended, we should not continue checking the flags */
+       else if (di->charge_state == STATE_SUSPENDED_INIT ||
+               di->charge_state == STATE_SUSPENDED) {
+               /* We don't do anything here, just don,t continue */
+       }
+
+       /* Safety timer expiration */
+       else if (di->events.safety_timer_expired) {
+               if (di->charge_state != STATE_SAFETY_TIMER_EXPIRED)
+                       ab8500_chargalg_state_to(di,
+                               STATE_SAFETY_TIMER_EXPIRED_INIT);
+       }
+       /*
+        * Check if any interrupts has occured
+        * that will prevent us from charging
+        */
+
+       /* Battery removed */
+       else if (di->events.batt_rem) {
+               if (di->charge_state != STATE_BATT_REMOVED)
+                       ab8500_chargalg_state_to(di, STATE_BATT_REMOVED_INIT);
+       }
+       /* Main or USB charger not ok. */
+       else if (di->events.mainextchnotok || di->events.usbchargernotok) {
+               /*
+                * If vbus_collapsed is set, we have to lower the charger
+                * current, which is done in the normal state below
+                */
+               if (di->charge_state != STATE_CHG_NOT_OK &&
+                               !di->events.vbus_collapsed)
+                       ab8500_chargalg_state_to(di, STATE_CHG_NOT_OK_INIT);
+       }
+       /* VBUS, Main or VBAT OVV. */
+       else if (di->events.vbus_ovv ||
+                       di->events.main_ovv ||
+                       di->events.batt_ovv ||
+                       !di->chg_info.usb_chg_ok ||
+                       !di->chg_info.ac_chg_ok) {
+               if (di->charge_state != STATE_OVV_PROTECT)
+                       ab8500_chargalg_state_to(di, STATE_OVV_PROTECT_INIT);
+       }
+       /* USB Thermal, stop charging */
+       else if (di->events.main_thermal_prot ||
+               di->events.usb_thermal_prot) {
+               if (di->charge_state != STATE_HW_TEMP_PROTECT)
+                       ab8500_chargalg_state_to(di,
+                               STATE_HW_TEMP_PROTECT_INIT);
+       }
+       /* Battery temp over/under */
+       else if (di->events.btemp_underover) {
+               if (di->charge_state != STATE_TEMP_UNDEROVER)
+                       ab8500_chargalg_state_to(di,
+                               STATE_TEMP_UNDEROVER_INIT);
+       }
+       /* Watchdog expired */
+       else if (di->events.ac_wd_expired ||
+               di->events.usb_wd_expired) {
+               if (di->charge_state != STATE_WD_EXPIRED)
+                       ab8500_chargalg_state_to(di, STATE_WD_EXPIRED_INIT);
+       }
+       /* Battery temp high/low */
+       else if (di->events.btemp_lowhigh) {
+               if (di->charge_state != STATE_TEMP_LOWHIGH)
+                       ab8500_chargalg_state_to(di, STATE_TEMP_LOWHIGH_INIT);
+       }
+
+       dev_dbg(di->dev,
+               "[CHARGALG] Vb %d Ib_avg %d Ib_inst %d Tb %d Cap %d Maint %d "
+               "State %s Active_chg %d Chg_status %d AC %d USB %d "
+               "AC_online %d USB_online %d AC_CV %d USB_CV %d AC_I %d "
+               "USB_I %d AC_Vset %d AC_Iset %d USB_Vset %d USB_Iset %d\n",
+               di->batt_data.volt,
+               di->batt_data.avg_curr,
+               di->batt_data.inst_curr,
+               di->batt_data.temp,
+               di->batt_data.percent,
+               di->maintenance_chg,
+               states[di->charge_state],
+               di->chg_info.charger_type,
+               di->charge_status,
+               di->chg_info.conn_chg & AC_CHG,
+               di->chg_info.conn_chg & USB_CHG,
+               di->chg_info.online_chg & AC_CHG,
+               di->chg_info.online_chg & USB_CHG,
+               di->events.ac_cv_active,
+               di->events.usb_cv_active,
+               di->chg_info.ac_curr,
+               di->chg_info.usb_curr,
+               di->chg_info.ac_vset,
+               di->chg_info.ac_iset,
+               di->chg_info.usb_vset,
+               di->chg_info.usb_iset);
+
+       switch (di->charge_state) {
+       case STATE_HANDHELD_INIT:
+               ab8500_chargalg_stop_charging(di);
+               di->charge_status = POWER_SUPPLY_STATUS_DISCHARGING;
+               ab8500_chargalg_state_to(di, STATE_HANDHELD);
+               fallthrough;
+
+       case STATE_HANDHELD:
+               break;
+
+       case STATE_SUSPENDED_INIT:
+               if (di->susp_status.ac_suspended)
+                       ab8500_chargalg_ac_en(di, false, 0, 0);
+               if (di->susp_status.usb_suspended)
+                       ab8500_chargalg_usb_en(di, false, 0, 0);
+               ab8500_chargalg_stop_safety_timer(di);
+               ab8500_chargalg_stop_maintenance_timer(di);
+               di->charge_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+               di->maintenance_chg = false;
+               ab8500_chargalg_state_to(di, STATE_SUSPENDED);
+               power_supply_changed(di->chargalg_psy);
+               fallthrough;
+
+       case STATE_SUSPENDED:
+               /* CHARGING is suspended */
+               break;
+
+       case STATE_BATT_REMOVED_INIT:
+               ab8500_chargalg_stop_charging(di);
+               ab8500_chargalg_state_to(di, STATE_BATT_REMOVED);
+               fallthrough;
+
+       case STATE_BATT_REMOVED:
+               if (!di->events.batt_rem)
+                       ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
+               break;
+
+       case STATE_HW_TEMP_PROTECT_INIT:
+               ab8500_chargalg_stop_charging(di);
+               ab8500_chargalg_state_to(di, STATE_HW_TEMP_PROTECT);
+               fallthrough;
+
+       case STATE_HW_TEMP_PROTECT:
+               if (!di->events.main_thermal_prot &&
+                               !di->events.usb_thermal_prot)
+                       ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
+               break;
+
+       case STATE_OVV_PROTECT_INIT:
+               ab8500_chargalg_stop_charging(di);
+               ab8500_chargalg_state_to(di, STATE_OVV_PROTECT);
+               fallthrough;
+
+       case STATE_OVV_PROTECT:
+               if (!di->events.vbus_ovv &&
+                               !di->events.main_ovv &&
+                               !di->events.batt_ovv &&
+                               di->chg_info.usb_chg_ok &&
+                               di->chg_info.ac_chg_ok)
+                       ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
+               break;
+
+       case STATE_CHG_NOT_OK_INIT:
+               ab8500_chargalg_stop_charging(di);
+               ab8500_chargalg_state_to(di, STATE_CHG_NOT_OK);
+               fallthrough;
+
+       case STATE_CHG_NOT_OK:
+               if (!di->events.mainextchnotok &&
+                               !di->events.usbchargernotok)
+                       ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
+               break;
+
+       case STATE_SAFETY_TIMER_EXPIRED_INIT:
+               ab8500_chargalg_stop_charging(di);
+               ab8500_chargalg_state_to(di, STATE_SAFETY_TIMER_EXPIRED);
+               fallthrough;
+
+       case STATE_SAFETY_TIMER_EXPIRED:
+               /* We exit this state when charger is removed */
+               break;
+
+       case STATE_NORMAL_INIT:
+               if (di->curr_status.curr_step == CHARGALG_CURR_STEP_LOW)
+                       ab8500_chargalg_stop_charging(di);
+               else {
+                       curr_step_lvl = di->bm->bat_type[
+                               di->bm->batt_id].normal_cur_lvl
+                               * di->curr_status.curr_step
+                               / CHARGALG_CURR_STEP_HIGH;
+                       ab8500_chargalg_start_charging(di,
+                               di->bm->bat_type[di->bm->batt_id]
+                               .normal_vol_lvl, curr_step_lvl);
+               }
+
+               ab8500_chargalg_state_to(di, STATE_NORMAL);
+               ab8500_chargalg_start_safety_timer(di);
+               ab8500_chargalg_stop_maintenance_timer(di);
+               init_maxim_chg_curr(di);
+               di->charge_status = POWER_SUPPLY_STATUS_CHARGING;
+               di->eoc_cnt = 0;
+               di->maintenance_chg = false;
+               power_supply_changed(di->chargalg_psy);
+
+               break;
+
+       case STATE_NORMAL:
+               handle_maxim_chg_curr(di);
+               if (di->charge_status == POWER_SUPPLY_STATUS_FULL &&
+                       di->maintenance_chg) {
+                       if (di->bm->no_maintenance)
+                               ab8500_chargalg_state_to(di,
+                                       STATE_WAIT_FOR_RECHARGE_INIT);
+                       else
+                               ab8500_chargalg_state_to(di,
+                                       STATE_MAINTENANCE_A_INIT);
+               }
+               break;
+
+       /* This state will be used when the maintenance state is disabled */
+       case STATE_WAIT_FOR_RECHARGE_INIT:
+               ab8500_chargalg_hold_charging(di);
+               ab8500_chargalg_state_to(di, STATE_WAIT_FOR_RECHARGE);
+               fallthrough;
+
+       case STATE_WAIT_FOR_RECHARGE:
+               if (di->batt_data.percent <=
+                   di->bm->bat_type[di->bm->batt_id].recharge_cap)
+                       ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
+               break;
+
+       case STATE_MAINTENANCE_A_INIT:
+               ab8500_chargalg_stop_safety_timer(di);
+               ab8500_chargalg_start_maintenance_timer(di,
+                       di->bm->bat_type[
+                               di->bm->batt_id].maint_a_chg_timer_h);
+               ab8500_chargalg_start_charging(di,
+                       di->bm->bat_type[
+                               di->bm->batt_id].maint_a_vol_lvl,
+                       di->bm->bat_type[
+                               di->bm->batt_id].maint_a_cur_lvl);
+               ab8500_chargalg_state_to(di, STATE_MAINTENANCE_A);
+               power_supply_changed(di->chargalg_psy);
+               fallthrough;
+
+       case STATE_MAINTENANCE_A:
+               if (di->events.maintenance_timer_expired) {
+                       ab8500_chargalg_stop_maintenance_timer(di);
+                       ab8500_chargalg_state_to(di, STATE_MAINTENANCE_B_INIT);
+               }
+               break;
+
+       case STATE_MAINTENANCE_B_INIT:
+               ab8500_chargalg_start_maintenance_timer(di,
+                       di->bm->bat_type[
+                               di->bm->batt_id].maint_b_chg_timer_h);
+               ab8500_chargalg_start_charging(di,
+                       di->bm->bat_type[
+                               di->bm->batt_id].maint_b_vol_lvl,
+                       di->bm->bat_type[
+                               di->bm->batt_id].maint_b_cur_lvl);
+               ab8500_chargalg_state_to(di, STATE_MAINTENANCE_B);
+               power_supply_changed(di->chargalg_psy);
+               fallthrough;
+
+       case STATE_MAINTENANCE_B:
+               if (di->events.maintenance_timer_expired) {
+                       ab8500_chargalg_stop_maintenance_timer(di);
+                       ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
+               }
+               break;
+
+       case STATE_TEMP_LOWHIGH_INIT:
+               ab8500_chargalg_start_charging(di,
+                       di->bm->bat_type[
+                               di->bm->batt_id].low_high_vol_lvl,
+                       di->bm->bat_type[
+                               di->bm->batt_id].low_high_cur_lvl);
+               ab8500_chargalg_stop_maintenance_timer(di);
+               di->charge_status = POWER_SUPPLY_STATUS_CHARGING;
+               ab8500_chargalg_state_to(di, STATE_TEMP_LOWHIGH);
+               power_supply_changed(di->chargalg_psy);
+               fallthrough;
+
+       case STATE_TEMP_LOWHIGH:
+               if (!di->events.btemp_lowhigh)
+                       ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
+               break;
+
+       case STATE_WD_EXPIRED_INIT:
+               ab8500_chargalg_stop_charging(di);
+               ab8500_chargalg_state_to(di, STATE_WD_EXPIRED);
+               fallthrough;
+
+       case STATE_WD_EXPIRED:
+               if (!di->events.ac_wd_expired &&
+                               !di->events.usb_wd_expired)
+                       ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
+               break;
+
+       case STATE_TEMP_UNDEROVER_INIT:
+               ab8500_chargalg_stop_charging(di);
+               ab8500_chargalg_state_to(di, STATE_TEMP_UNDEROVER);
+               fallthrough;
+
+       case STATE_TEMP_UNDEROVER:
+               if (!di->events.btemp_underover)
+                       ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
+               break;
+       }
+
+       /* Start charging directly if the new state is a charge state */
+       if (di->charge_state == STATE_NORMAL_INIT ||
+                       di->charge_state == STATE_MAINTENANCE_A_INIT ||
+                       di->charge_state == STATE_MAINTENANCE_B_INIT)
+               queue_work(di->chargalg_wq, &di->chargalg_work);
+}
+
+/**
+ * ab8500_chargalg_periodic_work() - Periodic work for the algorithm
+ * @work:      pointer to the work_struct structure
+ *
+ * Work queue function for the charging algorithm
+ */
+static void ab8500_chargalg_periodic_work(struct work_struct *work)
+{
+       struct ab8500_chargalg *di = container_of(work,
+               struct ab8500_chargalg, chargalg_periodic_work.work);
+
+       ab8500_chargalg_algorithm(di);
+
+       /*
+        * If a charger is connected then the battery has to be monitored
+        * frequently, else the work can be delayed.
+        */
+       if (di->chg_info.conn_chg)
+               queue_delayed_work(di->chargalg_wq,
+                       &di->chargalg_periodic_work,
+                       di->bm->interval_charging * HZ);
+       else
+               queue_delayed_work(di->chargalg_wq,
+                       &di->chargalg_periodic_work,
+                       di->bm->interval_not_charging * HZ);
+}
+
+/**
+ * ab8500_chargalg_wd_work() - periodic work to kick the charger watchdog
+ * @work:      pointer to the work_struct structure
+ *
+ * Work queue function for kicking the charger watchdog
+ */
+static void ab8500_chargalg_wd_work(struct work_struct *work)
+{
+       int ret;
+       struct ab8500_chargalg *di = container_of(work,
+               struct ab8500_chargalg, chargalg_wd_work.work);
+
+       ret = ab8500_chargalg_kick_watchdog(di);
+       if (ret < 0)
+               dev_err(di->dev, "failed to kick watchdog\n");
+
+       queue_delayed_work(di->chargalg_wq,
+               &di->chargalg_wd_work, CHG_WD_INTERVAL);
+}
+
+/**
+ * ab8500_chargalg_work() - Work to run the charging algorithm instantly
+ * @work:      pointer to the work_struct structure
+ *
+ * Work queue function for calling the charging algorithm
+ */
+static void ab8500_chargalg_work(struct work_struct *work)
+{
+       struct ab8500_chargalg *di = container_of(work,
+               struct ab8500_chargalg, chargalg_work);
+
+       ab8500_chargalg_algorithm(di);
+}
+
+/**
+ * ab8500_chargalg_get_property() - get the chargalg properties
+ * @psy:       pointer to the power_supply structure
+ * @psp:       pointer to the power_supply_property structure
+ * @val:       pointer to the power_supply_propval union
+ *
+ * This function gets called when an application tries to get the
+ * chargalg properties by reading the sysfs files.
+ * status:     charging/discharging/full/unknown
+ * health:     health of the battery
+ * Returns error code in case of failure else 0 on success
+ */
+static int ab8500_chargalg_get_property(struct power_supply *psy,
+       enum power_supply_property psp,
+       union power_supply_propval *val)
+{
+       struct ab8500_chargalg *di = power_supply_get_drvdata(psy);
+
+       switch (psp) {
+       case POWER_SUPPLY_PROP_STATUS:
+               val->intval = di->charge_status;
+               break;
+       case POWER_SUPPLY_PROP_HEALTH:
+               if (di->events.batt_ovv) {
+                       val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+               } else if (di->events.btemp_underover) {
+                       if (di->batt_data.temp <= di->bm->temp_under)
+                               val->intval = POWER_SUPPLY_HEALTH_COLD;
+                       else
+                               val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
+               } else if (di->charge_state == STATE_SAFETY_TIMER_EXPIRED ||
+                          di->charge_state == STATE_SAFETY_TIMER_EXPIRED_INIT) {
+                       val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+               } else {
+                       val->intval = POWER_SUPPLY_HEALTH_GOOD;
+               }
+               break;
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
+/* Exposure to the sysfs interface */
+
+static ssize_t ab8500_chargalg_curr_step_show(struct ab8500_chargalg *di,
+                                             char *buf)
+{
+       return sprintf(buf, "%d\n", di->curr_status.curr_step);
+}
+
+static ssize_t ab8500_chargalg_curr_step_store(struct ab8500_chargalg *di,
+                                              const char *buf, size_t length)
+{
+       long param;
+       int ret;
+
+       ret = kstrtol(buf, 10, &param);
+       if (ret < 0)
+               return ret;
+
+       di->curr_status.curr_step = param;
+       if (di->curr_status.curr_step >= CHARGALG_CURR_STEP_LOW &&
+               di->curr_status.curr_step <= CHARGALG_CURR_STEP_HIGH) {
+               di->curr_status.curr_step_change = true;
+               queue_work(di->chargalg_wq, &di->chargalg_work);
+       } else
+               dev_info(di->dev, "Wrong current step\n"
+                       "Enter 0. Disable AC/USB Charging\n"
+                       "1--100. Set AC/USB charging current step\n"
+                       "100. Enable AC/USB Charging\n");
+
+       return strlen(buf);
+}
+
+
+static ssize_t ab8500_chargalg_en_show(struct ab8500_chargalg *di,
+                                      char *buf)
+{
+       return sprintf(buf, "%d\n",
+                      di->susp_status.ac_suspended &&
+                      di->susp_status.usb_suspended);
+}
+
+static ssize_t ab8500_chargalg_en_store(struct ab8500_chargalg *di,
+       const char *buf, size_t length)
+{
+       long param;
+       int ac_usb;
+       int ret;
+
+       ret = kstrtol(buf, 10, &param);
+       if (ret < 0)
+               return ret;
+
+       ac_usb = param;
+       switch (ac_usb) {
+       case 0:
+               /* Disable charging */
+               di->susp_status.ac_suspended = true;
+               di->susp_status.usb_suspended = true;
+               di->susp_status.suspended_change = true;
+               /* Trigger a state change */
+               queue_work(di->chargalg_wq,
+                       &di->chargalg_work);
+               break;
+       case 1:
+               /* Enable AC Charging */
+               di->susp_status.ac_suspended = false;
+               di->susp_status.suspended_change = true;
+               /* Trigger a state change */
+               queue_work(di->chargalg_wq,
+                       &di->chargalg_work);
+               break;
+       case 2:
+               /* Enable USB charging */
+               di->susp_status.usb_suspended = false;
+               di->susp_status.suspended_change = true;
+               /* Trigger a state change */
+               queue_work(di->chargalg_wq,
+                       &di->chargalg_work);
+               break;
+       default:
+               dev_info(di->dev, "Wrong input\n"
+                       "Enter 0. Disable AC/USB Charging\n"
+                       "1. Enable AC charging\n"
+                       "2. Enable USB Charging\n");
+       }
+       return strlen(buf);
+}
+
+static struct ab8500_chargalg_sysfs_entry ab8500_chargalg_en_charger =
+       __ATTR(chargalg, 0644, ab8500_chargalg_en_show,
+                               ab8500_chargalg_en_store);
+
+static struct ab8500_chargalg_sysfs_entry ab8500_chargalg_curr_step =
+       __ATTR(chargalg_curr_step, 0644, ab8500_chargalg_curr_step_show,
+                                       ab8500_chargalg_curr_step_store);
+
+static ssize_t ab8500_chargalg_sysfs_show(struct kobject *kobj,
+       struct attribute *attr, char *buf)
+{
+       struct ab8500_chargalg_sysfs_entry *entry = container_of(attr,
+               struct ab8500_chargalg_sysfs_entry, attr);
+
+       struct ab8500_chargalg *di = container_of(kobj,
+               struct ab8500_chargalg, chargalg_kobject);
+
+       if (!entry->show)
+               return -EIO;
+
+       return entry->show(di, buf);
+}
+
+static ssize_t ab8500_chargalg_sysfs_charger(struct kobject *kobj,
+       struct attribute *attr, const char *buf, size_t length)
+{
+       struct ab8500_chargalg_sysfs_entry *entry = container_of(attr,
+               struct ab8500_chargalg_sysfs_entry, attr);
+
+       struct ab8500_chargalg *di = container_of(kobj,
+               struct ab8500_chargalg, chargalg_kobject);
+
+       if (!entry->store)
+               return -EIO;
+
+       return entry->store(di, buf, length);
+}
+
+static struct attribute *ab8500_chargalg_chg[] = {
+       &ab8500_chargalg_en_charger.attr,
+       &ab8500_chargalg_curr_step.attr,
+       NULL,
+};
+
+static const struct sysfs_ops ab8500_chargalg_sysfs_ops = {
+       .show = ab8500_chargalg_sysfs_show,
+       .store = ab8500_chargalg_sysfs_charger,
+};
+
+static struct kobj_type ab8500_chargalg_ktype = {
+       .sysfs_ops = &ab8500_chargalg_sysfs_ops,
+       .default_attrs = ab8500_chargalg_chg,
+};
+
+/**
+ * ab8500_chargalg_sysfs_exit() - de-init of sysfs entry
+ * @di:                pointer to the struct ab8500_chargalg
+ *
+ * This function removes the entry in sysfs.
+ */
+static void ab8500_chargalg_sysfs_exit(struct ab8500_chargalg *di)
+{
+       kobject_del(&di->chargalg_kobject);
+}
+
+/**
+ * ab8500_chargalg_sysfs_init() - init of sysfs entry
+ * @di:                pointer to the struct ab8500_chargalg
+ *
+ * This function adds an entry in sysfs.
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab8500_chargalg_sysfs_init(struct ab8500_chargalg *di)
+{
+       int ret = 0;
+
+       ret = kobject_init_and_add(&di->chargalg_kobject,
+               &ab8500_chargalg_ktype,
+               NULL, "ab8500_chargalg");
+       if (ret < 0)
+               dev_err(di->dev, "failed to create sysfs entry\n");
+
+       return ret;
+}
+/* Exposure to the sysfs interface <<END>> */
+
+static int __maybe_unused ab8500_chargalg_resume(struct device *dev)
+{
+       struct ab8500_chargalg *di = dev_get_drvdata(dev);
+
+       /* Kick charger watchdog if charging (any charger online) */
+       if (di->chg_info.online_chg)
+               queue_delayed_work(di->chargalg_wq, &di->chargalg_wd_work, 0);
+
+       /*
+        * Run the charging algorithm directly to be sure we don't
+        * do it too seldom
+        */
+       queue_delayed_work(di->chargalg_wq, &di->chargalg_periodic_work, 0);
+
+       return 0;
+}
+
+static int __maybe_unused ab8500_chargalg_suspend(struct device *dev)
+{
+       struct ab8500_chargalg *di = dev_get_drvdata(dev);
+
+       if (di->chg_info.online_chg)
+               cancel_delayed_work_sync(&di->chargalg_wd_work);
+
+       cancel_delayed_work_sync(&di->chargalg_periodic_work);
+
+       return 0;
+}
+
+static char *supply_interface[] = {
+       "ab8500_fg",
+};
+
+static const struct power_supply_desc ab8500_chargalg_desc = {
+       .name                   = "ab8500_chargalg",
+       .type                   = POWER_SUPPLY_TYPE_BATTERY,
+       .properties             = ab8500_chargalg_props,
+       .num_properties         = ARRAY_SIZE(ab8500_chargalg_props),
+       .get_property           = ab8500_chargalg_get_property,
+       .external_power_changed = ab8500_chargalg_external_power_changed,
+};
+
+static int ab8500_chargalg_bind(struct device *dev, struct device *master,
+                               void *data)
+{
+       struct ab8500_chargalg *di = dev_get_drvdata(dev);
+
+       /* Create a work queue for the chargalg */
+       di->chargalg_wq = alloc_ordered_workqueue("ab8500_chargalg_wq",
+                                                 WQ_MEM_RECLAIM);
+       if (di->chargalg_wq == NULL) {
+               dev_err(di->dev, "failed to create work queue\n");
+               return -ENOMEM;
+       }
+
+       /* Run the charging algorithm */
+       queue_delayed_work(di->chargalg_wq, &di->chargalg_periodic_work, 0);
+
+       return 0;
+}
+
+static void ab8500_chargalg_unbind(struct device *dev, struct device *master,
+                                  void *data)
+{
+       struct ab8500_chargalg *di = dev_get_drvdata(dev);
+
+       /* Stop all timers and work */
+       hrtimer_cancel(&di->safety_timer);
+       hrtimer_cancel(&di->maintenance_timer);
+
+       cancel_delayed_work_sync(&di->chargalg_periodic_work);
+       cancel_delayed_work_sync(&di->chargalg_wd_work);
+       cancel_work_sync(&di->chargalg_work);
+
+       /* Delete the work queue */
+       destroy_workqueue(di->chargalg_wq);
+       flush_scheduled_work();
+}
+
+static const struct component_ops ab8500_chargalg_component_ops = {
+       .bind = ab8500_chargalg_bind,
+       .unbind = ab8500_chargalg_unbind,
+};
+
+static int ab8500_chargalg_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct power_supply_config psy_cfg = {};
+       struct ab8500_chargalg *di;
+       int ret = 0;
+
+       di = devm_kzalloc(dev, sizeof(*di), GFP_KERNEL);
+       if (!di)
+               return -ENOMEM;
+
+       di->bm = &ab8500_bm_data;
+
+       /* get device struct and parent */
+       di->dev = dev;
+       di->parent = dev_get_drvdata(pdev->dev.parent);
+
+       psy_cfg.supplied_to = supply_interface;
+       psy_cfg.num_supplicants = ARRAY_SIZE(supply_interface);
+       psy_cfg.drv_data = di;
+
+       /* Initilialize safety timer */
+       hrtimer_init(&di->safety_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
+       di->safety_timer.function = ab8500_chargalg_safety_timer_expired;
+
+       /* Initilialize maintenance timer */
+       hrtimer_init(&di->maintenance_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
+       di->maintenance_timer.function =
+               ab8500_chargalg_maintenance_timer_expired;
+
+       /* Init work for chargalg */
+       INIT_DEFERRABLE_WORK(&di->chargalg_periodic_work,
+               ab8500_chargalg_periodic_work);
+       INIT_DEFERRABLE_WORK(&di->chargalg_wd_work,
+               ab8500_chargalg_wd_work);
+
+       /* Init work for chargalg */
+       INIT_WORK(&di->chargalg_work, ab8500_chargalg_work);
+
+       /* To detect charger at startup */
+       di->chg_info.prev_conn_chg = -1;
+
+       /* Register chargalg power supply class */
+       di->chargalg_psy = devm_power_supply_register(di->dev,
+                                                &ab8500_chargalg_desc,
+                                                &psy_cfg);
+       if (IS_ERR(di->chargalg_psy)) {
+               dev_err(di->dev, "failed to register chargalg psy\n");
+               return PTR_ERR(di->chargalg_psy);
+       }
+
+       platform_set_drvdata(pdev, di);
+
+       /* sysfs interface to enable/disable charging from user space */
+       ret = ab8500_chargalg_sysfs_init(di);
+       if (ret) {
+               dev_err(di->dev, "failed to create sysfs entry\n");
+               return ret;
+       }
+       di->curr_status.curr_step = CHARGALG_CURR_STEP_HIGH;
+
+       dev_info(di->dev, "probe success\n");
+       return component_add(dev, &ab8500_chargalg_component_ops);
+}
+
+static int ab8500_chargalg_remove(struct platform_device *pdev)
+{
+       struct ab8500_chargalg *di = platform_get_drvdata(pdev);
+
+       component_del(&pdev->dev, &ab8500_chargalg_component_ops);
+
+       /* sysfs interface to enable/disable charging from user space */
+       ab8500_chargalg_sysfs_exit(di);
+
+       return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(ab8500_chargalg_pm_ops, ab8500_chargalg_suspend, ab8500_chargalg_resume);
+
+static const struct of_device_id ab8500_chargalg_match[] = {
+       { .compatible = "stericsson,ab8500-chargalg", },
+       { },
+};
+
+struct platform_driver ab8500_chargalg_driver = {
+       .probe = ab8500_chargalg_probe,
+       .remove = ab8500_chargalg_remove,
+       .driver = {
+               .name = "ab8500_chargalg",
+               .of_match_table = ab8500_chargalg_match,
+               .pm = &ab8500_chargalg_pm_ops,
+       },
+};
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Johan Palsson, Karl Komierowski");
+MODULE_ALIAS("platform:ab8500-chargalg");
+MODULE_DESCRIPTION("ab8500 battery charging algorithm");
index fa49e12..15eadaf 100644 (file)
@@ -292,7 +292,7 @@ struct ab8500_charger {
        struct iio_channel *adc_main_charger_c;
        struct iio_channel *adc_vbus_v;
        struct iio_channel *adc_usb_charger_c;
-       struct abx500_bm_data *bm;
+       struct ab8500_bm_data *bm;
        struct ab8500_charger_event_flags flags;
        struct ab8500_charger_usb_state usb_state;
        struct ab8500_charger_max_usb_in_curr max_usb_in_curr;
@@ -3388,7 +3388,7 @@ static const struct component_master_ops ab8500_charger_comp_ops = {
 static struct platform_driver *const ab8500_charger_component_drivers[] = {
        &ab8500_fg_driver,
        &ab8500_btemp_driver,
-       &abx500_chargalg_driver,
+       &ab8500_chargalg_driver,
 };
 
 static int ab8500_charger_compare_dev(struct device *dev, void *data)
index a6ebdb2..05fe972 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/mfd/abx500/ab8500.h>
 #include <linux/iio/consumer.h>
 #include <linux/kernel.h>
+#include <linux/fixp-arith.h>
 
 #include "ab8500-bm.h"
 
@@ -56,9 +57,6 @@
 /* FG constants */
 #define BATT_OVV                       0x01
 
-#define interpolate(x, x1, y1, x2, y2) \
-       ((y1) + ((((y2) - (y1)) * ((x) - (x1))) / ((x2) - (x1))));
-
 /**
  * struct ab8500_fg_interrupts - ab8500 fg interrupts
  * @name:      name of the interrupt
@@ -227,7 +225,7 @@ struct ab8500_fg {
        struct ab8500_fg_avg_cap avg_cap;
        struct ab8500 *parent;
        struct iio_channel *main_bat_v;
-       struct abx500_bm_data *bm;
+       struct ab8500_bm_data *bm;
        struct power_supply *fg_psy;
        struct workqueue_struct *fg_wq;
        struct delayed_work fg_periodic_work;
@@ -856,7 +854,7 @@ static int ab8500_fg_bat_voltage(struct ab8500_fg *di)
 static int ab8500_fg_volt_to_capacity(struct ab8500_fg *di, int voltage)
 {
        int i, tbl_size;
-       const struct abx500_v_to_cap *tbl;
+       const struct ab8500_v_to_cap *tbl;
        int cap = 0;
 
        tbl = di->bm->bat_type[di->bm->batt_id].v_to_cap_tbl;
@@ -868,11 +866,12 @@ static int ab8500_fg_volt_to_capacity(struct ab8500_fg *di, int voltage)
        }
 
        if ((i > 0) && (i < tbl_size)) {
-               cap = interpolate(voltage,
+               cap = fixp_linear_interpolate(
                        tbl[i].voltage,
                        tbl[i].capacity * 10,
                        tbl[i-1].voltage,
-                       tbl[i-1].capacity * 10);
+                       tbl[i-1].capacity * 10,
+                       voltage);
        } else if (i == 0) {
                cap = 1000;
        } else {
@@ -920,11 +919,12 @@ static int ab8500_fg_battery_resistance(struct ab8500_fg *di)
        }
 
        if ((i > 0) && (i < tbl_size)) {
-               resist = interpolate(di->bat_temp / 10,
+               resist = fixp_linear_interpolate(
                        tbl[i].temp,
                        tbl[i].resist,
                        tbl[i-1].temp,
-                       tbl[i-1].resist);
+                       tbl[i-1].resist,
+                       di->bat_temp / 10);
        } else if (i == 0) {
                resist = tbl[0].resist;
        } else {
@@ -2235,7 +2235,7 @@ static int ab8500_fg_get_ext_psy_data(struct device *dev, void *data)
                        case POWER_SUPPLY_TYPE_BATTERY:
                                if (!di->flags.batt_id_received &&
                                    di->bm->batt_id != BATTERY_UNKNOWN) {
-                                       const struct abx500_battery_type *b;
+                                       const struct ab8500_battery_type *b;
 
                                        b = &(di->bm->bat_type[di->bm->batt_id]);
 
diff --git a/drivers/power/supply/abx500_chargalg.c b/drivers/power/supply/abx500_chargalg.c
deleted file mode 100644 (file)
index b72826c..0000000
+++ /dev/null
@@ -1,2099 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) ST-Ericsson SA 2012
- * Copyright (c) 2012 Sony Mobile Communications AB
- *
- * Charging algorithm driver for abx500 variants
- *
- * Authors:
- *     Johan Palsson <johan.palsson@stericsson.com>
- *     Karl Komierowski <karl.komierowski@stericsson.com>
- *     Arun R Murthy <arun.murthy@stericsson.com>
- *     Author: Imre Sunyi <imre.sunyi@sonymobile.com>
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/component.h>
-#include <linux/hrtimer.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/platform_device.h>
-#include <linux/power_supply.h>
-#include <linux/completion.h>
-#include <linux/workqueue.h>
-#include <linux/kobject.h>
-#include <linux/of.h>
-#include <linux/mfd/core.h>
-#include <linux/mfd/abx500.h>
-#include <linux/mfd/abx500/ab8500.h>
-#include <linux/notifier.h>
-
-#include "ab8500-bm.h"
-#include "ab8500-chargalg.h"
-
-/* Watchdog kick interval */
-#define CHG_WD_INTERVAL                        (6 * HZ)
-
-/* End-of-charge criteria counter */
-#define EOC_COND_CNT                   10
-
-/* One hour expressed in seconds */
-#define ONE_HOUR_IN_SECONDS            3600
-
-/* Five minutes expressed in seconds */
-#define FIVE_MINUTES_IN_SECONDS        300
-
-#define CHARGALG_CURR_STEP_LOW         0
-#define CHARGALG_CURR_STEP_HIGH        100
-
-enum abx500_chargers {
-       NO_CHG,
-       AC_CHG,
-       USB_CHG,
-};
-
-struct abx500_chargalg_charger_info {
-       enum abx500_chargers conn_chg;
-       enum abx500_chargers prev_conn_chg;
-       enum abx500_chargers online_chg;
-       enum abx500_chargers prev_online_chg;
-       enum abx500_chargers charger_type;
-       bool usb_chg_ok;
-       bool ac_chg_ok;
-       int usb_volt;
-       int usb_curr;
-       int ac_volt;
-       int ac_curr;
-       int usb_vset;
-       int usb_iset;
-       int ac_vset;
-       int ac_iset;
-};
-
-struct abx500_chargalg_suspension_status {
-       bool suspended_change;
-       bool ac_suspended;
-       bool usb_suspended;
-};
-
-struct abx500_chargalg_current_step_status {
-       bool curr_step_change;
-       int curr_step;
-};
-
-struct abx500_chargalg_battery_data {
-       int temp;
-       int volt;
-       int avg_curr;
-       int inst_curr;
-       int percent;
-};
-
-enum abx500_chargalg_states {
-       STATE_HANDHELD_INIT,
-       STATE_HANDHELD,
-       STATE_CHG_NOT_OK_INIT,
-       STATE_CHG_NOT_OK,
-       STATE_HW_TEMP_PROTECT_INIT,
-       STATE_HW_TEMP_PROTECT,
-       STATE_NORMAL_INIT,
-       STATE_NORMAL,
-       STATE_WAIT_FOR_RECHARGE_INIT,
-       STATE_WAIT_FOR_RECHARGE,
-       STATE_MAINTENANCE_A_INIT,
-       STATE_MAINTENANCE_A,
-       STATE_MAINTENANCE_B_INIT,
-       STATE_MAINTENANCE_B,
-       STATE_TEMP_UNDEROVER_INIT,
-       STATE_TEMP_UNDEROVER,
-       STATE_TEMP_LOWHIGH_INIT,
-       STATE_TEMP_LOWHIGH,
-       STATE_SUSPENDED_INIT,
-       STATE_SUSPENDED,
-       STATE_OVV_PROTECT_INIT,
-       STATE_OVV_PROTECT,
-       STATE_SAFETY_TIMER_EXPIRED_INIT,
-       STATE_SAFETY_TIMER_EXPIRED,
-       STATE_BATT_REMOVED_INIT,
-       STATE_BATT_REMOVED,
-       STATE_WD_EXPIRED_INIT,
-       STATE_WD_EXPIRED,
-};
-
-static const char *states[] = {
-       "HANDHELD_INIT",
-       "HANDHELD",
-       "CHG_NOT_OK_INIT",
-       "CHG_NOT_OK",
-       "HW_TEMP_PROTECT_INIT",
-       "HW_TEMP_PROTECT",
-       "NORMAL_INIT",
-       "NORMAL",
-       "WAIT_FOR_RECHARGE_INIT",
-       "WAIT_FOR_RECHARGE",
-       "MAINTENANCE_A_INIT",
-       "MAINTENANCE_A",
-       "MAINTENANCE_B_INIT",
-       "MAINTENANCE_B",
-       "TEMP_UNDEROVER_INIT",
-       "TEMP_UNDEROVER",
-       "TEMP_LOWHIGH_INIT",
-       "TEMP_LOWHIGH",
-       "SUSPENDED_INIT",
-       "SUSPENDED",
-       "OVV_PROTECT_INIT",
-       "OVV_PROTECT",
-       "SAFETY_TIMER_EXPIRED_INIT",
-       "SAFETY_TIMER_EXPIRED",
-       "BATT_REMOVED_INIT",
-       "BATT_REMOVED",
-       "WD_EXPIRED_INIT",
-       "WD_EXPIRED",
-};
-
-struct abx500_chargalg_events {
-       bool batt_unknown;
-       bool mainextchnotok;
-       bool batt_ovv;
-       bool batt_rem;
-       bool btemp_underover;
-       bool btemp_lowhigh;
-       bool main_thermal_prot;
-       bool usb_thermal_prot;
-       bool main_ovv;
-       bool vbus_ovv;
-       bool usbchargernotok;
-       bool safety_timer_expired;
-       bool maintenance_timer_expired;
-       bool ac_wd_expired;
-       bool usb_wd_expired;
-       bool ac_cv_active;
-       bool usb_cv_active;
-       bool vbus_collapsed;
-};
-
-/**
- * struct abx500_charge_curr_maximization - Charger maximization parameters
- * @original_iset:     the non optimized/maximised charger current
- * @current_iset:      the charging current used at this moment
- * @test_delta_i:      the delta between the current we want to charge and the
-                       current that is really going into the battery
- * @condition_cnt:     number of iterations needed before a new charger current
-                       is set
- * @max_current:       maximum charger current
- * @wait_cnt:          to avoid too fast current step down in case of charger
- *                     voltage collapse, we insert this delay between step
- *                     down
- * @level:             tells in how many steps the charging current has been
-                       increased
- */
-struct abx500_charge_curr_maximization {
-       int original_iset;
-       int current_iset;
-       int test_delta_i;
-       int condition_cnt;
-       int max_current;
-       int wait_cnt;
-       u8 level;
-};
-
-enum maxim_ret {
-       MAXIM_RET_NOACTION,
-       MAXIM_RET_CHANGE,
-       MAXIM_RET_IBAT_TOO_HIGH,
-};
-
-/**
- * struct abx500_chargalg - abx500 Charging algorithm device information
- * @dev:               pointer to the structure device
- * @charge_status:     battery operating status
- * @eoc_cnt:           counter used to determine end-of_charge
- * @maintenance_chg:   indicate if maintenance charge is active
- * @t_hyst_norm                temperature hysteresis when the temperature has been
- *                     over or under normal limits
- * @t_hyst_lowhigh     temperature hysteresis when the temperature has been
- *                     over or under the high or low limits
- * @charge_state:      current state of the charging algorithm
- * @ccm                        charging current maximization parameters
- * @chg_info:          information about connected charger types
- * @batt_data:         data of the battery
- * @susp_status:       current charger suspension status
- * @bm:                Platform specific battery management information
- * @curr_status:       Current step status for over-current protection
- * @parent:            pointer to the struct abx500
- * @chargalg_psy:      structure that holds the battery properties exposed by
- *                     the charging algorithm
- * @events:            structure for information about events triggered
- * @chargalg_wq:               work queue for running the charging algorithm
- * @chargalg_periodic_work:    work to run the charging algorithm periodically
- * @chargalg_wd_work:          work to kick the charger watchdog periodically
- * @chargalg_work:             work to run the charging algorithm instantly
- * @safety_timer:              charging safety timer
- * @maintenance_timer:         maintenance charging timer
- * @chargalg_kobject:          structure of type kobject
- */
-struct abx500_chargalg {
-       struct device *dev;
-       int charge_status;
-       int eoc_cnt;
-       bool maintenance_chg;
-       int t_hyst_norm;
-       int t_hyst_lowhigh;
-       enum abx500_chargalg_states charge_state;
-       struct abx500_charge_curr_maximization ccm;
-       struct abx500_chargalg_charger_info chg_info;
-       struct abx500_chargalg_battery_data batt_data;
-       struct abx500_chargalg_suspension_status susp_status;
-       struct ab8500 *parent;
-       struct abx500_chargalg_current_step_status curr_status;
-       struct abx500_bm_data *bm;
-       struct power_supply *chargalg_psy;
-       struct ux500_charger *ac_chg;
-       struct ux500_charger *usb_chg;
-       struct abx500_chargalg_events events;
-       struct workqueue_struct *chargalg_wq;
-       struct delayed_work chargalg_periodic_work;
-       struct delayed_work chargalg_wd_work;
-       struct work_struct chargalg_work;
-       struct hrtimer safety_timer;
-       struct hrtimer maintenance_timer;
-       struct kobject chargalg_kobject;
-};
-
-/*External charger prepare notifier*/
-BLOCKING_NOTIFIER_HEAD(charger_notifier_list);
-
-/* Main battery properties */
-static enum power_supply_property abx500_chargalg_props[] = {
-       POWER_SUPPLY_PROP_STATUS,
-       POWER_SUPPLY_PROP_HEALTH,
-};
-
-struct abx500_chargalg_sysfs_entry {
-       struct attribute attr;
-       ssize_t (*show)(struct abx500_chargalg *, char *);
-       ssize_t (*store)(struct abx500_chargalg *, const char *, size_t);
-};
-
-/**
- * abx500_chargalg_safety_timer_expired() - Expiration of the safety timer
- * @timer:     pointer to the hrtimer structure
- *
- * This function gets called when the safety timer for the charger
- * expires
- */
-static enum hrtimer_restart
-abx500_chargalg_safety_timer_expired(struct hrtimer *timer)
-{
-       struct abx500_chargalg *di = container_of(timer, struct abx500_chargalg,
-                                                 safety_timer);
-       dev_err(di->dev, "Safety timer expired\n");
-       di->events.safety_timer_expired = true;
-
-       /* Trigger execution of the algorithm instantly */
-       queue_work(di->chargalg_wq, &di->chargalg_work);
-
-       return HRTIMER_NORESTART;
-}
-
-/**
- * abx500_chargalg_maintenance_timer_expired() - Expiration of
- * the maintenance timer
- * @timer:     pointer to the timer structure
- *
- * This function gets called when the maintenence timer
- * expires
- */
-static enum hrtimer_restart
-abx500_chargalg_maintenance_timer_expired(struct hrtimer *timer)
-{
-
-       struct abx500_chargalg *di = container_of(timer, struct abx500_chargalg,
-                                                 maintenance_timer);
-
-       dev_dbg(di->dev, "Maintenance timer expired\n");
-       di->events.maintenance_timer_expired = true;
-
-       /* Trigger execution of the algorithm instantly */
-       queue_work(di->chargalg_wq, &di->chargalg_work);
-
-       return HRTIMER_NORESTART;
-}
-
-/**
- * abx500_chargalg_state_to() - Change charge state
- * @di:                pointer to the abx500_chargalg structure
- *
- * This function gets called when a charge state change should occur
- */
-static void abx500_chargalg_state_to(struct abx500_chargalg *di,
-       enum abx500_chargalg_states state)
-{
-       dev_dbg(di->dev,
-               "State changed: %s (From state: [%d] %s =to=> [%d] %s )\n",
-               di->charge_state == state ? "NO" : "YES",
-               di->charge_state,
-               states[di->charge_state],
-               state,
-               states[state]);
-
-       di->charge_state = state;
-}
-
-static int abx500_chargalg_check_charger_enable(struct abx500_chargalg *di)
-{
-       switch (di->charge_state) {
-       case STATE_NORMAL:
-       case STATE_MAINTENANCE_A:
-       case STATE_MAINTENANCE_B:
-               break;
-       default:
-               return 0;
-       }
-
-       if (di->chg_info.charger_type & USB_CHG) {
-               return di->usb_chg->ops.check_enable(di->usb_chg,
-                       di->bm->bat_type[di->bm->batt_id].normal_vol_lvl,
-                       di->bm->bat_type[di->bm->batt_id].normal_cur_lvl);
-       } else if ((di->chg_info.charger_type & AC_CHG) &&
-                  !(di->ac_chg->external)) {
-               return di->ac_chg->ops.check_enable(di->ac_chg,
-                       di->bm->bat_type[di->bm->batt_id].normal_vol_lvl,
-                       di->bm->bat_type[di->bm->batt_id].normal_cur_lvl);
-       }
-       return 0;
-}
-
-/**
- * abx500_chargalg_check_charger_connection() - Check charger connection change
- * @di:                pointer to the abx500_chargalg structure
- *
- * This function will check if there is a change in the charger connection
- * and change charge state accordingly. AC has precedence over USB.
- */
-static int abx500_chargalg_check_charger_connection(struct abx500_chargalg *di)
-{
-       if (di->chg_info.conn_chg != di->chg_info.prev_conn_chg ||
-               di->susp_status.suspended_change) {
-               /*
-                * Charger state changed or suspension
-                * has changed since last update
-                */
-               if ((di->chg_info.conn_chg & AC_CHG) &&
-                       !di->susp_status.ac_suspended) {
-                       dev_dbg(di->dev, "Charging source is AC\n");
-                       if (di->chg_info.charger_type != AC_CHG) {
-                               di->chg_info.charger_type = AC_CHG;
-                               abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
-                       }
-               } else if ((di->chg_info.conn_chg & USB_CHG) &&
-                       !di->susp_status.usb_suspended) {
-                       dev_dbg(di->dev, "Charging source is USB\n");
-                       di->chg_info.charger_type = USB_CHG;
-                       abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
-               } else if (di->chg_info.conn_chg &&
-                       (di->susp_status.ac_suspended ||
-                       di->susp_status.usb_suspended)) {
-                       dev_dbg(di->dev, "Charging is suspended\n");
-                       di->chg_info.charger_type = NO_CHG;
-                       abx500_chargalg_state_to(di, STATE_SUSPENDED_INIT);
-               } else {
-                       dev_dbg(di->dev, "Charging source is OFF\n");
-                       di->chg_info.charger_type = NO_CHG;
-                       abx500_chargalg_state_to(di, STATE_HANDHELD_INIT);
-               }
-               di->chg_info.prev_conn_chg = di->chg_info.conn_chg;
-               di->susp_status.suspended_change = false;
-       }
-       return di->chg_info.conn_chg;
-}
-
-/**
- * abx500_chargalg_check_current_step_status() - Check charging current
- * step status.
- * @di:                pointer to the abx500_chargalg structure
- *
- * This function will check if there is a change in the charging current step
- * and change charge state accordingly.
- */
-static void abx500_chargalg_check_current_step_status
-       (struct abx500_chargalg *di)
-{
-       if (di->curr_status.curr_step_change)
-               abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
-       di->curr_status.curr_step_change = false;
-}
-
-/**
- * abx500_chargalg_start_safety_timer() - Start charging safety timer
- * @di:                pointer to the abx500_chargalg structure
- *
- * The safety timer is used to avoid overcharging of old or bad batteries.
- * There are different timers for AC and USB
- */
-static void abx500_chargalg_start_safety_timer(struct abx500_chargalg *di)
-{
-       /* Charger-dependent expiration time in hours*/
-       int timer_expiration = 0;
-
-       switch (di->chg_info.charger_type) {
-       case AC_CHG:
-               timer_expiration = di->bm->main_safety_tmr_h;
-               break;
-
-       case USB_CHG:
-               timer_expiration = di->bm->usb_safety_tmr_h;
-               break;
-
-       default:
-               dev_err(di->dev, "Unknown charger to charge from\n");
-               break;
-       }
-
-       di->events.safety_timer_expired = false;
-       hrtimer_set_expires_range(&di->safety_timer,
-               ktime_set(timer_expiration * ONE_HOUR_IN_SECONDS, 0),
-               ktime_set(FIVE_MINUTES_IN_SECONDS, 0));
-       hrtimer_start_expires(&di->safety_timer, HRTIMER_MODE_REL);
-}
-
-/**
- * abx500_chargalg_stop_safety_timer() - Stop charging safety timer
- * @di:                pointer to the abx500_chargalg structure
- *
- * The safety timer is stopped whenever the NORMAL state is exited
- */
-static void abx500_chargalg_stop_safety_timer(struct abx500_chargalg *di)
-{
-       if (hrtimer_try_to_cancel(&di->safety_timer) >= 0)
-               di->events.safety_timer_expired = false;
-}
-
-/**
- * abx500_chargalg_start_maintenance_timer() - Start charging maintenance timer
- * @di:                pointer to the abx500_chargalg structure
- * @duration:  duration of ther maintenance timer in hours
- *
- * The maintenance timer is used to maintain the charge in the battery once
- * the battery is considered full. These timers are chosen to match the
- * discharge curve of the battery
- */
-static void abx500_chargalg_start_maintenance_timer(struct abx500_chargalg *di,
-       int duration)
-{
-       hrtimer_set_expires_range(&di->maintenance_timer,
-               ktime_set(duration * ONE_HOUR_IN_SECONDS, 0),
-               ktime_set(FIVE_MINUTES_IN_SECONDS, 0));
-       di->events.maintenance_timer_expired = false;
-       hrtimer_start_expires(&di->maintenance_timer, HRTIMER_MODE_REL);
-}
-
-/**
- * abx500_chargalg_stop_maintenance_timer() - Stop maintenance timer
- * @di:                pointer to the abx500_chargalg structure
- *
- * The maintenance timer is stopped whenever maintenance ends or when another
- * state is entered
- */
-static void abx500_chargalg_stop_maintenance_timer(struct abx500_chargalg *di)
-{
-       if (hrtimer_try_to_cancel(&di->maintenance_timer) >= 0)
-               di->events.maintenance_timer_expired = false;
-}
-
-/**
- * abx500_chargalg_kick_watchdog() - Kick charger watchdog
- * @di:                pointer to the abx500_chargalg structure
- *
- * The charger watchdog have to be kicked periodically whenever the charger is
- * on, else the ABB will reset the system
- */
-static int abx500_chargalg_kick_watchdog(struct abx500_chargalg *di)
-{
-       /* Check if charger exists and kick watchdog if charging */
-       if (di->ac_chg && di->ac_chg->ops.kick_wd &&
-           di->chg_info.online_chg & AC_CHG) {
-               /*
-                * If AB charger watchdog expired, pm2xxx charging
-                * gets disabled. To be safe, kick both AB charger watchdog
-                * and pm2xxx watchdog.
-                */
-               if (di->ac_chg->external &&
-                   di->usb_chg && di->usb_chg->ops.kick_wd)
-                       di->usb_chg->ops.kick_wd(di->usb_chg);
-
-               return di->ac_chg->ops.kick_wd(di->ac_chg);
-       }
-       else if (di->usb_chg && di->usb_chg->ops.kick_wd &&
-                       di->chg_info.online_chg & USB_CHG)
-               return di->usb_chg->ops.kick_wd(di->usb_chg);
-
-       return -ENXIO;
-}
-
-/**
- * abx500_chargalg_ac_en() - Turn on/off the AC charger
- * @di:                pointer to the abx500_chargalg structure
- * @enable:    charger on/off
- * @vset:      requested charger output voltage
- * @iset:      requested charger output current
- *
- * The AC charger will be turned on/off with the requested charge voltage and
- * current
- */
-static int abx500_chargalg_ac_en(struct abx500_chargalg *di, int enable,
-       int vset, int iset)
-{
-       static int abx500_chargalg_ex_ac_enable_toggle;
-
-       if (!di->ac_chg || !di->ac_chg->ops.enable)
-               return -ENXIO;
-
-       /* Select maximum of what both the charger and the battery supports */
-       if (di->ac_chg->max_out_volt)
-               vset = min(vset, di->ac_chg->max_out_volt);
-       if (di->ac_chg->max_out_curr)
-               iset = min(iset, di->ac_chg->max_out_curr);
-
-       di->chg_info.ac_iset = iset;
-       di->chg_info.ac_vset = vset;
-
-       /* Enable external charger */
-       if (enable && di->ac_chg->external &&
-           !abx500_chargalg_ex_ac_enable_toggle) {
-               blocking_notifier_call_chain(&charger_notifier_list,
-                                            0, di->dev);
-               abx500_chargalg_ex_ac_enable_toggle++;
-       }
-
-       return di->ac_chg->ops.enable(di->ac_chg, enable, vset, iset);
-}
-
-/**
- * abx500_chargalg_usb_en() - Turn on/off the USB charger
- * @di:                pointer to the abx500_chargalg structure
- * @enable:    charger on/off
- * @vset:      requested charger output voltage
- * @iset:      requested charger output current
- *
- * The USB charger will be turned on/off with the requested charge voltage and
- * current
- */
-static int abx500_chargalg_usb_en(struct abx500_chargalg *di, int enable,
-       int vset, int iset)
-{
-       if (!di->usb_chg || !di->usb_chg->ops.enable)
-               return -ENXIO;
-
-       /* Select maximum of what both the charger and the battery supports */
-       if (di->usb_chg->max_out_volt)
-               vset = min(vset, di->usb_chg->max_out_volt);
-       if (di->usb_chg->max_out_curr)
-               iset = min(iset, di->usb_chg->max_out_curr);
-
-       di->chg_info.usb_iset = iset;
-       di->chg_info.usb_vset = vset;
-
-       return di->usb_chg->ops.enable(di->usb_chg, enable, vset, iset);
-}
-
-/**
- * abx500_chargalg_update_chg_curr() - Update charger current
- * @di:                pointer to the abx500_chargalg structure
- * @iset:      requested charger output current
- *
- * The charger output current will be updated for the charger
- * that is currently in use
- */
-static int abx500_chargalg_update_chg_curr(struct abx500_chargalg *di,
-               int iset)
-{
-       /* Check if charger exists and update current if charging */
-       if (di->ac_chg && di->ac_chg->ops.update_curr &&
-                       di->chg_info.charger_type & AC_CHG) {
-               /*
-                * Select maximum of what both the charger
-                * and the battery supports
-                */
-               if (di->ac_chg->max_out_curr)
-                       iset = min(iset, di->ac_chg->max_out_curr);
-
-               di->chg_info.ac_iset = iset;
-
-               return di->ac_chg->ops.update_curr(di->ac_chg, iset);
-       } else if (di->usb_chg && di->usb_chg->ops.update_curr &&
-                       di->chg_info.charger_type & USB_CHG) {
-               /*
-                * Select maximum of what both the charger
-                * and the battery supports
-                */
-               if (di->usb_chg->max_out_curr)
-                       iset = min(iset, di->usb_chg->max_out_curr);
-
-               di->chg_info.usb_iset = iset;
-
-               return di->usb_chg->ops.update_curr(di->usb_chg, iset);
-       }
-
-       return -ENXIO;
-}
-
-/**
- * abx500_chargalg_stop_charging() - Stop charging
- * @di:                pointer to the abx500_chargalg structure
- *
- * This function is called from any state where charging should be stopped.
- * All charging is disabled and all status parameters and timers are changed
- * accordingly
- */
-static void abx500_chargalg_stop_charging(struct abx500_chargalg *di)
-{
-       abx500_chargalg_ac_en(di, false, 0, 0);
-       abx500_chargalg_usb_en(di, false, 0, 0);
-       abx500_chargalg_stop_safety_timer(di);
-       abx500_chargalg_stop_maintenance_timer(di);
-       di->charge_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
-       di->maintenance_chg = false;
-       cancel_delayed_work(&di->chargalg_wd_work);
-       power_supply_changed(di->chargalg_psy);
-}
-
-/**
- * abx500_chargalg_hold_charging() - Pauses charging
- * @di:                pointer to the abx500_chargalg structure
- *
- * This function is called in the case where maintenance charging has been
- * disabled and instead a battery voltage mode is entered to check when the
- * battery voltage has reached a certain recharge voltage
- */
-static void abx500_chargalg_hold_charging(struct abx500_chargalg *di)
-{
-       abx500_chargalg_ac_en(di, false, 0, 0);
-       abx500_chargalg_usb_en(di, false, 0, 0);
-       abx500_chargalg_stop_safety_timer(di);
-       abx500_chargalg_stop_maintenance_timer(di);
-       di->charge_status = POWER_SUPPLY_STATUS_CHARGING;
-       di->maintenance_chg = false;
-       cancel_delayed_work(&di->chargalg_wd_work);
-       power_supply_changed(di->chargalg_psy);
-}
-
-/**
- * abx500_chargalg_start_charging() - Start the charger
- * @di:                pointer to the abx500_chargalg structure
- * @vset:      requested charger output voltage
- * @iset:      requested charger output current
- *
- * A charger will be enabled depending on the requested charger type that was
- * detected previously.
- */
-static void abx500_chargalg_start_charging(struct abx500_chargalg *di,
-       int vset, int iset)
-{
-       switch (di->chg_info.charger_type) {
-       case AC_CHG:
-               dev_dbg(di->dev,
-                       "AC parameters: Vset %d, Ich %d\n", vset, iset);
-               abx500_chargalg_usb_en(di, false, 0, 0);
-               abx500_chargalg_ac_en(di, true, vset, iset);
-               break;
-
-       case USB_CHG:
-               dev_dbg(di->dev,
-                       "USB parameters: Vset %d, Ich %d\n", vset, iset);
-               abx500_chargalg_ac_en(di, false, 0, 0);
-               abx500_chargalg_usb_en(di, true, vset, iset);
-               break;
-
-       default:
-               dev_err(di->dev, "Unknown charger to charge from\n");
-               break;
-       }
-}
-
-/**
- * abx500_chargalg_check_temp() - Check battery temperature ranges
- * @di:                pointer to the abx500_chargalg structure
- *
- * The battery temperature is checked against the predefined limits and the
- * charge state is changed accordingly
- */
-static void abx500_chargalg_check_temp(struct abx500_chargalg *di)
-{
-       if (di->batt_data.temp > (di->bm->temp_low + di->t_hyst_norm) &&
-               di->batt_data.temp < (di->bm->temp_high - di->t_hyst_norm)) {
-               /* Temp OK! */
-               di->events.btemp_underover = false;
-               di->events.btemp_lowhigh = false;
-               di->t_hyst_norm = 0;
-               di->t_hyst_lowhigh = 0;
-       } else {
-               if (((di->batt_data.temp >= di->bm->temp_high) &&
-                       (di->batt_data.temp <
-                               (di->bm->temp_over - di->t_hyst_lowhigh))) ||
-                       ((di->batt_data.temp >
-                               (di->bm->temp_under + di->t_hyst_lowhigh)) &&
-                       (di->batt_data.temp <= di->bm->temp_low))) {
-                       /* TEMP minor!!!!! */
-                       di->events.btemp_underover = false;
-                       di->events.btemp_lowhigh = true;
-                       di->t_hyst_norm = di->bm->temp_hysteresis;
-                       di->t_hyst_lowhigh = 0;
-               } else if (di->batt_data.temp <= di->bm->temp_under ||
-                       di->batt_data.temp >= di->bm->temp_over) {
-                       /* TEMP major!!!!! */
-                       di->events.btemp_underover = true;
-                       di->events.btemp_lowhigh = false;
-                       di->t_hyst_norm = 0;
-                       di->t_hyst_lowhigh = di->bm->temp_hysteresis;
-               } else {
-               /* Within hysteresis */
-               dev_dbg(di->dev, "Within hysteresis limit temp: %d "
-                               "hyst_lowhigh %d, hyst normal %d\n",
-                               di->batt_data.temp, di->t_hyst_lowhigh,
-                               di->t_hyst_norm);
-               }
-       }
-}
-
-/**
- * abx500_chargalg_check_charger_voltage() - Check charger voltage
- * @di:                pointer to the abx500_chargalg structure
- *
- * Charger voltage is checked against maximum limit
- */
-static void abx500_chargalg_check_charger_voltage(struct abx500_chargalg *di)
-{
-       if (di->chg_info.usb_volt > di->bm->chg_params->usb_volt_max)
-               di->chg_info.usb_chg_ok = false;
-       else
-               di->chg_info.usb_chg_ok = true;
-
-       if (di->chg_info.ac_volt > di->bm->chg_params->ac_volt_max)
-               di->chg_info.ac_chg_ok = false;
-       else
-               di->chg_info.ac_chg_ok = true;
-
-}
-
-/**
- * abx500_chargalg_end_of_charge() - Check if end-of-charge criteria is fulfilled
- * @di:                pointer to the abx500_chargalg structure
- *
- * End-of-charge criteria is fulfilled when the battery voltage is above a
- * certain limit and the battery current is below a certain limit for a
- * predefined number of consecutive seconds. If true, the battery is full
- */
-static void abx500_chargalg_end_of_charge(struct abx500_chargalg *di)
-{
-       if (di->charge_status == POWER_SUPPLY_STATUS_CHARGING &&
-               di->charge_state == STATE_NORMAL &&
-               !di->maintenance_chg && (di->batt_data.volt >=
-               di->bm->bat_type[di->bm->batt_id].termination_vol ||
-               di->events.usb_cv_active || di->events.ac_cv_active) &&
-               di->batt_data.avg_curr <
-               di->bm->bat_type[di->bm->batt_id].termination_curr &&
-               di->batt_data.avg_curr > 0) {
-               if (++di->eoc_cnt >= EOC_COND_CNT) {
-                       di->eoc_cnt = 0;
-                       di->charge_status = POWER_SUPPLY_STATUS_FULL;
-                       di->maintenance_chg = true;
-                       dev_dbg(di->dev, "EOC reached!\n");
-                       power_supply_changed(di->chargalg_psy);
-               } else {
-                       dev_dbg(di->dev,
-                               " EOC limit reached for the %d"
-                               " time, out of %d before EOC\n",
-                               di->eoc_cnt,
-                               EOC_COND_CNT);
-               }
-       } else {
-               di->eoc_cnt = 0;
-       }
-}
-
-static void init_maxim_chg_curr(struct abx500_chargalg *di)
-{
-       di->ccm.original_iset =
-               di->bm->bat_type[di->bm->batt_id].normal_cur_lvl;
-       di->ccm.current_iset =
-               di->bm->bat_type[di->bm->batt_id].normal_cur_lvl;
-       di->ccm.test_delta_i = di->bm->maxi->charger_curr_step;
-       di->ccm.max_current = di->bm->maxi->chg_curr;
-       di->ccm.condition_cnt = di->bm->maxi->wait_cycles;
-       di->ccm.level = 0;
-}
-
-/**
- * abx500_chargalg_chg_curr_maxim - increases the charger current to
- *                     compensate for the system load
- * @di         pointer to the abx500_chargalg structure
- *
- * This maximization function is used to raise the charger current to get the
- * battery current as close to the optimal value as possible. The battery
- * current during charging is affected by the system load
- */
-static enum maxim_ret abx500_chargalg_chg_curr_maxim(struct abx500_chargalg *di)
-{
-       int delta_i;
-
-       if (!di->bm->maxi->ena_maxi)
-               return MAXIM_RET_NOACTION;
-
-       delta_i = di->ccm.original_iset - di->batt_data.inst_curr;
-
-       if (di->events.vbus_collapsed) {
-               dev_dbg(di->dev, "Charger voltage has collapsed %d\n",
-                               di->ccm.wait_cnt);
-               if (di->ccm.wait_cnt == 0) {
-                       dev_dbg(di->dev, "lowering current\n");
-                       di->ccm.wait_cnt++;
-                       di->ccm.condition_cnt = di->bm->maxi->wait_cycles;
-                       di->ccm.max_current =
-                               di->ccm.current_iset - di->ccm.test_delta_i;
-                       di->ccm.current_iset = di->ccm.max_current;
-                       di->ccm.level--;
-                       return MAXIM_RET_CHANGE;
-               } else {
-                       dev_dbg(di->dev, "waiting\n");
-                       /* Let's go in here twice before lowering curr again */
-                       di->ccm.wait_cnt = (di->ccm.wait_cnt + 1) % 3;
-                       return MAXIM_RET_NOACTION;
-               }
-       }
-
-       di->ccm.wait_cnt = 0;
-
-       if ((di->batt_data.inst_curr > di->ccm.original_iset)) {
-               dev_dbg(di->dev, " Maximization Ibat (%dmA) too high"
-                       " (limit %dmA) (current iset: %dmA)!\n",
-                       di->batt_data.inst_curr, di->ccm.original_iset,
-                       di->ccm.current_iset);
-
-               if (di->ccm.current_iset == di->ccm.original_iset)
-                       return MAXIM_RET_NOACTION;
-
-               di->ccm.condition_cnt = di->bm->maxi->wait_cycles;
-               di->ccm.current_iset = di->ccm.original_iset;
-               di->ccm.level = 0;
-
-               return MAXIM_RET_IBAT_TOO_HIGH;
-       }
-
-       if (delta_i > di->ccm.test_delta_i &&
-               (di->ccm.current_iset + di->ccm.test_delta_i) <
-               di->ccm.max_current) {
-               if (di->ccm.condition_cnt-- == 0) {
-                       /* Increse the iset with cco.test_delta_i */
-                       di->ccm.condition_cnt = di->bm->maxi->wait_cycles;
-                       di->ccm.current_iset += di->ccm.test_delta_i;
-                       di->ccm.level++;
-                       dev_dbg(di->dev, " Maximization needed, increase"
-                               " with %d mA to %dmA (Optimal ibat: %d)"
-                               " Level %d\n",
-                               di->ccm.test_delta_i,
-                               di->ccm.current_iset,
-                               di->ccm.original_iset,
-                               di->ccm.level);
-                       return MAXIM_RET_CHANGE;
-               } else {
-                       return MAXIM_RET_NOACTION;
-               }
-       }  else {
-               di->ccm.condition_cnt = di->bm->maxi->wait_cycles;
-               return MAXIM_RET_NOACTION;
-       }
-}
-
-static void handle_maxim_chg_curr(struct abx500_chargalg *di)
-{
-       enum maxim_ret ret;
-       int result;
-
-       ret = abx500_chargalg_chg_curr_maxim(di);
-       switch (ret) {
-       case MAXIM_RET_CHANGE:
-               result = abx500_chargalg_update_chg_curr(di,
-                       di->ccm.current_iset);
-               if (result)
-                       dev_err(di->dev, "failed to set chg curr\n");
-               break;
-       case MAXIM_RET_IBAT_TOO_HIGH:
-               result = abx500_chargalg_update_chg_curr(di,
-                       di->bm->bat_type[di->bm->batt_id].normal_cur_lvl);
-               if (result)
-                       dev_err(di->dev, "failed to set chg curr\n");
-               break;
-
-       case MAXIM_RET_NOACTION:
-       default:
-               /* Do nothing..*/
-               break;
-       }
-}
-
-static int abx500_chargalg_get_ext_psy_data(struct device *dev, void *data)
-{
-       struct power_supply *psy;
-       struct power_supply *ext = dev_get_drvdata(dev);
-       const char **supplicants = (const char **)ext->supplied_to;
-       struct abx500_chargalg *di;
-       union power_supply_propval ret;
-       int j;
-       bool capacity_updated = false;
-
-       psy = (struct power_supply *)data;
-       di = power_supply_get_drvdata(psy);
-       /* For all psy where the driver name appears in any supplied_to */
-       j = match_string(supplicants, ext->num_supplicants, psy->desc->name);
-       if (j < 0)
-               return 0;
-
-       /*
-        *  If external is not registering 'POWER_SUPPLY_PROP_CAPACITY' to its
-        * property because of handling that sysfs entry on its own, this is
-        * the place to get the battery capacity.
-        */
-       if (!power_supply_get_property(ext, POWER_SUPPLY_PROP_CAPACITY, &ret)) {
-               di->batt_data.percent = ret.intval;
-               capacity_updated = true;
-       }
-
-       /* Go through all properties for the psy */
-       for (j = 0; j < ext->desc->num_properties; j++) {
-               enum power_supply_property prop;
-               prop = ext->desc->properties[j];
-
-               /*
-                * Initialize chargers if not already done.
-                * The ab8500_charger*/
-               if (!di->ac_chg &&
-                       ext->desc->type == POWER_SUPPLY_TYPE_MAINS)
-                       di->ac_chg = psy_to_ux500_charger(ext);
-               else if (!di->usb_chg &&
-                       ext->desc->type == POWER_SUPPLY_TYPE_USB)
-                       di->usb_chg = psy_to_ux500_charger(ext);
-
-               if (power_supply_get_property(ext, prop, &ret))
-                       continue;
-               switch (prop) {
-               case POWER_SUPPLY_PROP_PRESENT:
-                       switch (ext->desc->type) {
-                       case POWER_SUPPLY_TYPE_BATTERY:
-                               /* Battery present */
-                               if (ret.intval)
-                                       di->events.batt_rem = false;
-                               /* Battery removed */
-                               else
-                                       di->events.batt_rem = true;
-                               break;
-                       case POWER_SUPPLY_TYPE_MAINS:
-                               /* AC disconnected */
-                               if (!ret.intval &&
-                                       (di->chg_info.conn_chg & AC_CHG)) {
-                                       di->chg_info.prev_conn_chg =
-                                               di->chg_info.conn_chg;
-                                       di->chg_info.conn_chg &= ~AC_CHG;
-                               }
-                               /* AC connected */
-                               else if (ret.intval &&
-                                       !(di->chg_info.conn_chg & AC_CHG)) {
-                                       di->chg_info.prev_conn_chg =
-                                               di->chg_info.conn_chg;
-                                       di->chg_info.conn_chg |= AC_CHG;
-                               }
-                               break;
-                       case POWER_SUPPLY_TYPE_USB:
-                               /* USB disconnected */
-                               if (!ret.intval &&
-                                       (di->chg_info.conn_chg & USB_CHG)) {
-                                       di->chg_info.prev_conn_chg =
-                                               di->chg_info.conn_chg;
-                                       di->chg_info.conn_chg &= ~USB_CHG;
-                               }
-                               /* USB connected */
-                               else if (ret.intval &&
-                                       !(di->chg_info.conn_chg & USB_CHG)) {
-                                       di->chg_info.prev_conn_chg =
-                                               di->chg_info.conn_chg;
-                                       di->chg_info.conn_chg |= USB_CHG;
-                               }
-                               break;
-                       default:
-                               break;
-                       }
-                       break;
-
-               case POWER_SUPPLY_PROP_ONLINE:
-                       switch (ext->desc->type) {
-                       case POWER_SUPPLY_TYPE_BATTERY:
-                               break;
-                       case POWER_SUPPLY_TYPE_MAINS:
-                               /* AC offline */
-                               if (!ret.intval &&
-                                       (di->chg_info.online_chg & AC_CHG)) {
-                                       di->chg_info.prev_online_chg =
-                                               di->chg_info.online_chg;
-                                       di->chg_info.online_chg &= ~AC_CHG;
-                               }
-                               /* AC online */
-                               else if (ret.intval &&
-                                       !(di->chg_info.online_chg & AC_CHG)) {
-                                       di->chg_info.prev_online_chg =
-                                               di->chg_info.online_chg;
-                                       di->chg_info.online_chg |= AC_CHG;
-                                       queue_delayed_work(di->chargalg_wq,
-                                               &di->chargalg_wd_work, 0);
-                               }
-                               break;
-                       case POWER_SUPPLY_TYPE_USB:
-                               /* USB offline */
-                               if (!ret.intval &&
-                                       (di->chg_info.online_chg & USB_CHG)) {
-                                       di->chg_info.prev_online_chg =
-                                               di->chg_info.online_chg;
-                                       di->chg_info.online_chg &= ~USB_CHG;
-                               }
-                               /* USB online */
-                               else if (ret.intval &&
-                                       !(di->chg_info.online_chg & USB_CHG)) {
-                                       di->chg_info.prev_online_chg =
-                                               di->chg_info.online_chg;
-                                       di->chg_info.online_chg |= USB_CHG;
-                                       queue_delayed_work(di->chargalg_wq,
-                                               &di->chargalg_wd_work, 0);
-                               }
-                               break;
-                       default:
-                               break;
-                       }
-                       break;
-
-               case POWER_SUPPLY_PROP_HEALTH:
-                       switch (ext->desc->type) {
-                       case POWER_SUPPLY_TYPE_BATTERY:
-                               break;
-                       case POWER_SUPPLY_TYPE_MAINS:
-                               switch (ret.intval) {
-                               case POWER_SUPPLY_HEALTH_UNSPEC_FAILURE:
-                                       di->events.mainextchnotok = true;
-                                       di->events.main_thermal_prot = false;
-                                       di->events.main_ovv = false;
-                                       di->events.ac_wd_expired = false;
-                                       break;
-                               case POWER_SUPPLY_HEALTH_DEAD:
-                                       di->events.ac_wd_expired = true;
-                                       di->events.mainextchnotok = false;
-                                       di->events.main_ovv = false;
-                                       di->events.main_thermal_prot = false;
-                                       break;
-                               case POWER_SUPPLY_HEALTH_COLD:
-                               case POWER_SUPPLY_HEALTH_OVERHEAT:
-                                       di->events.main_thermal_prot = true;
-                                       di->events.mainextchnotok = false;
-                                       di->events.main_ovv = false;
-                                       di->events.ac_wd_expired = false;
-                                       break;
-                               case POWER_SUPPLY_HEALTH_OVERVOLTAGE:
-                                       di->events.main_ovv = true;
-                                       di->events.mainextchnotok = false;
-                                       di->events.main_thermal_prot = false;
-                                       di->events.ac_wd_expired = false;
-                                       break;
-                               case POWER_SUPPLY_HEALTH_GOOD:
-                                       di->events.main_thermal_prot = false;
-                                       di->events.mainextchnotok = false;
-                                       di->events.main_ovv = false;
-                                       di->events.ac_wd_expired = false;
-                                       break;
-                               default:
-                                       break;
-                               }
-                               break;
-
-                       case POWER_SUPPLY_TYPE_USB:
-                               switch (ret.intval) {
-                               case POWER_SUPPLY_HEALTH_UNSPEC_FAILURE:
-                                       di->events.usbchargernotok = true;
-                                       di->events.usb_thermal_prot = false;
-                                       di->events.vbus_ovv = false;
-                                       di->events.usb_wd_expired = false;
-                                       break;
-                               case POWER_SUPPLY_HEALTH_DEAD:
-                                       di->events.usb_wd_expired = true;
-                                       di->events.usbchargernotok = false;
-                                       di->events.usb_thermal_prot = false;
-                                       di->events.vbus_ovv = false;
-                                       break;
-                               case POWER_SUPPLY_HEALTH_COLD:
-                               case POWER_SUPPLY_HEALTH_OVERHEAT:
-                                       di->events.usb_thermal_prot = true;
-                                       di->events.usbchargernotok = false;
-                                       di->events.vbus_ovv = false;
-                                       di->events.usb_wd_expired = false;
-                                       break;
-                               case POWER_SUPPLY_HEALTH_OVERVOLTAGE:
-                                       di->events.vbus_ovv = true;
-                                       di->events.usbchargernotok = false;
-                                       di->events.usb_thermal_prot = false;
-                                       di->events.usb_wd_expired = false;
-                                       break;
-                               case POWER_SUPPLY_HEALTH_GOOD:
-                                       di->events.usbchargernotok = false;
-                                       di->events.usb_thermal_prot = false;
-                                       di->events.vbus_ovv = false;
-                                       di->events.usb_wd_expired = false;
-                                       break;
-                               default:
-                                       break;
-                               }
-                               break;
-                       default:
-                               break;
-                       }
-                       break;
-
-               case POWER_SUPPLY_PROP_VOLTAGE_NOW:
-                       switch (ext->desc->type) {
-                       case POWER_SUPPLY_TYPE_BATTERY:
-                               di->batt_data.volt = ret.intval / 1000;
-                               break;
-                       case POWER_SUPPLY_TYPE_MAINS:
-                               di->chg_info.ac_volt = ret.intval / 1000;
-                               break;
-                       case POWER_SUPPLY_TYPE_USB:
-                               di->chg_info.usb_volt = ret.intval / 1000;
-                               break;
-                       default:
-                               break;
-                       }
-                       break;
-
-               case POWER_SUPPLY_PROP_VOLTAGE_AVG:
-                       switch (ext->desc->type) {
-                       case POWER_SUPPLY_TYPE_MAINS:
-                               /* AVG is used to indicate when we are
-                                * in CV mode */
-                               if (ret.intval)
-                                       di->events.ac_cv_active = true;
-                               else
-                                       di->events.ac_cv_active = false;
-
-                               break;
-                       case POWER_SUPPLY_TYPE_USB:
-                               /* AVG is used to indicate when we are
-                                * in CV mode */
-                               if (ret.intval)
-                                       di->events.usb_cv_active = true;
-                               else
-                                       di->events.usb_cv_active = false;
-
-                               break;
-                       default:
-                               break;
-                       }
-                       break;
-
-               case POWER_SUPPLY_PROP_TECHNOLOGY:
-                       switch (ext->desc->type) {
-                       case POWER_SUPPLY_TYPE_BATTERY:
-                               if (ret.intval)
-                                       di->events.batt_unknown = false;
-                               else
-                                       di->events.batt_unknown = true;
-
-                               break;
-                       default:
-                               break;
-                       }
-                       break;
-
-               case POWER_SUPPLY_PROP_TEMP:
-                       di->batt_data.temp = ret.intval / 10;
-                       break;
-
-               case POWER_SUPPLY_PROP_CURRENT_NOW:
-                       switch (ext->desc->type) {
-                       case POWER_SUPPLY_TYPE_MAINS:
-                                       di->chg_info.ac_curr =
-                                               ret.intval / 1000;
-                                       break;
-                       case POWER_SUPPLY_TYPE_USB:
-                                       di->chg_info.usb_curr =
-                                               ret.intval / 1000;
-                               break;
-                       case POWER_SUPPLY_TYPE_BATTERY:
-                               di->batt_data.inst_curr = ret.intval / 1000;
-                               break;
-                       default:
-                               break;
-                       }
-                       break;
-
-               case POWER_SUPPLY_PROP_CURRENT_AVG:
-                       switch (ext->desc->type) {
-                       case POWER_SUPPLY_TYPE_BATTERY:
-                               di->batt_data.avg_curr = ret.intval / 1000;
-                               break;
-                       case POWER_SUPPLY_TYPE_USB:
-                               if (ret.intval)
-                                       di->events.vbus_collapsed = true;
-                               else
-                                       di->events.vbus_collapsed = false;
-                               break;
-                       default:
-                               break;
-                       }
-                       break;
-               case POWER_SUPPLY_PROP_CAPACITY:
-                       if (!capacity_updated)
-                               di->batt_data.percent = ret.intval;
-                       break;
-               default:
-                       break;
-               }
-       }
-       return 0;
-}
-
-/**
- * abx500_chargalg_external_power_changed() - callback for power supply changes
- * @psy:       pointer to the structure power_supply
- *
- * This function is the entry point of the pointer external_power_changed
- * of the structure power_supply.
- * This function gets executed when there is a change in any external power
- * supply that this driver needs to be notified of.
- */
-static void abx500_chargalg_external_power_changed(struct power_supply *psy)
-{
-       struct abx500_chargalg *di = power_supply_get_drvdata(psy);
-
-       /*
-        * Trigger execution of the algorithm instantly and read
-        * all power_supply properties there instead
-        */
-       queue_work(di->chargalg_wq, &di->chargalg_work);
-}
-
-/**
- * abx500_chargalg_algorithm() - Main function for the algorithm
- * @di:                pointer to the abx500_chargalg structure
- *
- * This is the main control function for the charging algorithm.
- * It is called periodically or when something happens that will
- * trigger a state change
- */
-static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
-{
-       int charger_status;
-       int ret;
-       int curr_step_lvl;
-
-       /* Collect data from all power_supply class devices */
-       class_for_each_device(power_supply_class, NULL,
-               di->chargalg_psy, abx500_chargalg_get_ext_psy_data);
-
-       abx500_chargalg_end_of_charge(di);
-       abx500_chargalg_check_temp(di);
-       abx500_chargalg_check_charger_voltage(di);
-
-       charger_status = abx500_chargalg_check_charger_connection(di);
-       abx500_chargalg_check_current_step_status(di);
-
-       if (is_ab8500(di->parent)) {
-               ret = abx500_chargalg_check_charger_enable(di);
-               if (ret < 0)
-                       dev_err(di->dev, "Checking charger is enabled error"
-                                       ": Returned Value %d\n", ret);
-       }
-
-       /*
-        * First check if we have a charger connected.
-        * Also we don't allow charging of unknown batteries if configured
-        * this way
-        */
-       if (!charger_status ||
-               (di->events.batt_unknown && !di->bm->chg_unknown_bat)) {
-               if (di->charge_state != STATE_HANDHELD) {
-                       di->events.safety_timer_expired = false;
-                       abx500_chargalg_state_to(di, STATE_HANDHELD_INIT);
-               }
-       }
-
-       /* If suspended, we should not continue checking the flags */
-       else if (di->charge_state == STATE_SUSPENDED_INIT ||
-               di->charge_state == STATE_SUSPENDED) {
-               /* We don't do anything here, just don,t continue */
-       }
-
-       /* Safety timer expiration */
-       else if (di->events.safety_timer_expired) {
-               if (di->charge_state != STATE_SAFETY_TIMER_EXPIRED)
-                       abx500_chargalg_state_to(di,
-                               STATE_SAFETY_TIMER_EXPIRED_INIT);
-       }
-       /*
-        * Check if any interrupts has occured
-        * that will prevent us from charging
-        */
-
-       /* Battery removed */
-       else if (di->events.batt_rem) {
-               if (di->charge_state != STATE_BATT_REMOVED)
-                       abx500_chargalg_state_to(di, STATE_BATT_REMOVED_INIT);
-       }
-       /* Main or USB charger not ok. */
-       else if (di->events.mainextchnotok || di->events.usbchargernotok) {
-               /*
-                * If vbus_collapsed is set, we have to lower the charger
-                * current, which is done in the normal state below
-                */
-               if (di->charge_state != STATE_CHG_NOT_OK &&
-                               !di->events.vbus_collapsed)
-                       abx500_chargalg_state_to(di, STATE_CHG_NOT_OK_INIT);
-       }
-       /* VBUS, Main or VBAT OVV. */
-       else if (di->events.vbus_ovv ||
-                       di->events.main_ovv ||
-                       di->events.batt_ovv ||
-                       !di->chg_info.usb_chg_ok ||
-                       !di->chg_info.ac_chg_ok) {
-               if (di->charge_state != STATE_OVV_PROTECT)
-                       abx500_chargalg_state_to(di, STATE_OVV_PROTECT_INIT);
-       }
-       /* USB Thermal, stop charging */
-       else if (di->events.main_thermal_prot ||
-               di->events.usb_thermal_prot) {
-               if (di->charge_state != STATE_HW_TEMP_PROTECT)
-                       abx500_chargalg_state_to(di,
-                               STATE_HW_TEMP_PROTECT_INIT);
-       }
-       /* Battery temp over/under */
-       else if (di->events.btemp_underover) {
-               if (di->charge_state != STATE_TEMP_UNDEROVER)
-                       abx500_chargalg_state_to(di,
-                               STATE_TEMP_UNDEROVER_INIT);
-       }
-       /* Watchdog expired */
-       else if (di->events.ac_wd_expired ||
-               di->events.usb_wd_expired) {
-               if (di->charge_state != STATE_WD_EXPIRED)
-                       abx500_chargalg_state_to(di, STATE_WD_EXPIRED_INIT);
-       }
-       /* Battery temp high/low */
-       else if (di->events.btemp_lowhigh) {
-               if (di->charge_state != STATE_TEMP_LOWHIGH)
-                       abx500_chargalg_state_to(di, STATE_TEMP_LOWHIGH_INIT);
-       }
-
-       dev_dbg(di->dev,
-               "[CHARGALG] Vb %d Ib_avg %d Ib_inst %d Tb %d Cap %d Maint %d "
-               "State %s Active_chg %d Chg_status %d AC %d USB %d "
-               "AC_online %d USB_online %d AC_CV %d USB_CV %d AC_I %d "
-               "USB_I %d AC_Vset %d AC_Iset %d USB_Vset %d USB_Iset %d\n",
-               di->batt_data.volt,
-               di->batt_data.avg_curr,
-               di->batt_data.inst_curr,
-               di->batt_data.temp,
-               di->batt_data.percent,
-               di->maintenance_chg,
-               states[di->charge_state],
-               di->chg_info.charger_type,
-               di->charge_status,
-               di->chg_info.conn_chg & AC_CHG,
-               di->chg_info.conn_chg & USB_CHG,
-               di->chg_info.online_chg & AC_CHG,
-               di->chg_info.online_chg & USB_CHG,
-               di->events.ac_cv_active,
-               di->events.usb_cv_active,
-               di->chg_info.ac_curr,
-               di->chg_info.usb_curr,
-               di->chg_info.ac_vset,
-               di->chg_info.ac_iset,
-               di->chg_info.usb_vset,
-               di->chg_info.usb_iset);
-
-       switch (di->charge_state) {
-       case STATE_HANDHELD_INIT:
-               abx500_chargalg_stop_charging(di);
-               di->charge_status = POWER_SUPPLY_STATUS_DISCHARGING;
-               abx500_chargalg_state_to(di, STATE_HANDHELD);
-               fallthrough;
-
-       case STATE_HANDHELD:
-               break;
-
-       case STATE_SUSPENDED_INIT:
-               if (di->susp_status.ac_suspended)
-                       abx500_chargalg_ac_en(di, false, 0, 0);
-               if (di->susp_status.usb_suspended)
-                       abx500_chargalg_usb_en(di, false, 0, 0);
-               abx500_chargalg_stop_safety_timer(di);
-               abx500_chargalg_stop_maintenance_timer(di);
-               di->charge_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
-               di->maintenance_chg = false;
-               abx500_chargalg_state_to(di, STATE_SUSPENDED);
-               power_supply_changed(di->chargalg_psy);
-               fallthrough;
-
-       case STATE_SUSPENDED:
-               /* CHARGING is suspended */
-               break;
-
-       case STATE_BATT_REMOVED_INIT:
-               abx500_chargalg_stop_charging(di);
-               abx500_chargalg_state_to(di, STATE_BATT_REMOVED);
-               fallthrough;
-
-       case STATE_BATT_REMOVED:
-               if (!di->events.batt_rem)
-                       abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
-               break;
-
-       case STATE_HW_TEMP_PROTECT_INIT:
-               abx500_chargalg_stop_charging(di);
-               abx500_chargalg_state_to(di, STATE_HW_TEMP_PROTECT);
-               fallthrough;
-
-       case STATE_HW_TEMP_PROTECT:
-               if (!di->events.main_thermal_prot &&
-                               !di->events.usb_thermal_prot)
-                       abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
-               break;
-
-       case STATE_OVV_PROTECT_INIT:
-               abx500_chargalg_stop_charging(di);
-               abx500_chargalg_state_to(di, STATE_OVV_PROTECT);
-               fallthrough;
-
-       case STATE_OVV_PROTECT:
-               if (!di->events.vbus_ovv &&
-                               !di->events.main_ovv &&
-                               !di->events.batt_ovv &&
-                               di->chg_info.usb_chg_ok &&
-                               di->chg_info.ac_chg_ok)
-                       abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
-               break;
-
-       case STATE_CHG_NOT_OK_INIT:
-               abx500_chargalg_stop_charging(di);
-               abx500_chargalg_state_to(di, STATE_CHG_NOT_OK);
-               fallthrough;
-
-       case STATE_CHG_NOT_OK:
-               if (!di->events.mainextchnotok &&
-                               !di->events.usbchargernotok)
-                       abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
-               break;
-
-       case STATE_SAFETY_TIMER_EXPIRED_INIT:
-               abx500_chargalg_stop_charging(di);
-               abx500_chargalg_state_to(di, STATE_SAFETY_TIMER_EXPIRED);
-               fallthrough;
-
-       case STATE_SAFETY_TIMER_EXPIRED:
-               /* We exit this state when charger is removed */
-               break;
-
-       case STATE_NORMAL_INIT:
-               if (di->curr_status.curr_step == CHARGALG_CURR_STEP_LOW)
-                       abx500_chargalg_stop_charging(di);
-               else {
-                       curr_step_lvl = di->bm->bat_type[
-                               di->bm->batt_id].normal_cur_lvl
-                               * di->curr_status.curr_step
-                               / CHARGALG_CURR_STEP_HIGH;
-                       abx500_chargalg_start_charging(di,
-                               di->bm->bat_type[di->bm->batt_id]
-                               .normal_vol_lvl, curr_step_lvl);
-               }
-
-               abx500_chargalg_state_to(di, STATE_NORMAL);
-               abx500_chargalg_start_safety_timer(di);
-               abx500_chargalg_stop_maintenance_timer(di);
-               init_maxim_chg_curr(di);
-               di->charge_status = POWER_SUPPLY_STATUS_CHARGING;
-               di->eoc_cnt = 0;
-               di->maintenance_chg = false;
-               power_supply_changed(di->chargalg_psy);
-
-               break;
-
-       case STATE_NORMAL:
-               handle_maxim_chg_curr(di);
-               if (di->charge_status == POWER_SUPPLY_STATUS_FULL &&
-                       di->maintenance_chg) {
-                       if (di->bm->no_maintenance)
-                               abx500_chargalg_state_to(di,
-                                       STATE_WAIT_FOR_RECHARGE_INIT);
-                       else
-                               abx500_chargalg_state_to(di,
-                                       STATE_MAINTENANCE_A_INIT);
-               }
-               break;
-
-       /* This state will be used when the maintenance state is disabled */
-       case STATE_WAIT_FOR_RECHARGE_INIT:
-               abx500_chargalg_hold_charging(di);
-               abx500_chargalg_state_to(di, STATE_WAIT_FOR_RECHARGE);
-               fallthrough;
-
-       case STATE_WAIT_FOR_RECHARGE:
-               if (di->batt_data.percent <=
-                   di->bm->bat_type[di->bm->batt_id].
-                   recharge_cap)
-                       abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
-               break;
-
-       case STATE_MAINTENANCE_A_INIT:
-               abx500_chargalg_stop_safety_timer(di);
-               abx500_chargalg_start_maintenance_timer(di,
-                       di->bm->bat_type[
-                               di->bm->batt_id].maint_a_chg_timer_h);
-               abx500_chargalg_start_charging(di,
-                       di->bm->bat_type[
-                               di->bm->batt_id].maint_a_vol_lvl,
-                       di->bm->bat_type[
-                               di->bm->batt_id].maint_a_cur_lvl);
-               abx500_chargalg_state_to(di, STATE_MAINTENANCE_A);
-               power_supply_changed(di->chargalg_psy);
-               fallthrough;
-
-       case STATE_MAINTENANCE_A:
-               if (di->events.maintenance_timer_expired) {
-                       abx500_chargalg_stop_maintenance_timer(di);
-                       abx500_chargalg_state_to(di, STATE_MAINTENANCE_B_INIT);
-               }
-               break;
-
-       case STATE_MAINTENANCE_B_INIT:
-               abx500_chargalg_start_maintenance_timer(di,
-                       di->bm->bat_type[
-                               di->bm->batt_id].maint_b_chg_timer_h);
-               abx500_chargalg_start_charging(di,
-                       di->bm->bat_type[
-                               di->bm->batt_id].maint_b_vol_lvl,
-                       di->bm->bat_type[
-                               di->bm->batt_id].maint_b_cur_lvl);
-               abx500_chargalg_state_to(di, STATE_MAINTENANCE_B);
-               power_supply_changed(di->chargalg_psy);
-               fallthrough;
-
-       case STATE_MAINTENANCE_B:
-               if (di->events.maintenance_timer_expired) {
-                       abx500_chargalg_stop_maintenance_timer(di);
-                       abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
-               }
-               break;
-
-       case STATE_TEMP_LOWHIGH_INIT:
-               abx500_chargalg_start_charging(di,
-                       di->bm->bat_type[
-                               di->bm->batt_id].low_high_vol_lvl,
-                       di->bm->bat_type[
-                               di->bm->batt_id].low_high_cur_lvl);
-               abx500_chargalg_stop_maintenance_timer(di);
-               di->charge_status = POWER_SUPPLY_STATUS_CHARGING;
-               abx500_chargalg_state_to(di, STATE_TEMP_LOWHIGH);
-               power_supply_changed(di->chargalg_psy);
-               fallthrough;
-
-       case STATE_TEMP_LOWHIGH:
-               if (!di->events.btemp_lowhigh)
-                       abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
-               break;
-
-       case STATE_WD_EXPIRED_INIT:
-               abx500_chargalg_stop_charging(di);
-               abx500_chargalg_state_to(di, STATE_WD_EXPIRED);
-               fallthrough;
-
-       case STATE_WD_EXPIRED:
-               if (!di->events.ac_wd_expired &&
-                               !di->events.usb_wd_expired)
-                       abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
-               break;
-
-       case STATE_TEMP_UNDEROVER_INIT:
-               abx500_chargalg_stop_charging(di);
-               abx500_chargalg_state_to(di, STATE_TEMP_UNDEROVER);
-               fallthrough;
-
-       case STATE_TEMP_UNDEROVER:
-               if (!di->events.btemp_underover)
-                       abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
-               break;
-       }
-
-       /* Start charging directly if the new state is a charge state */
-       if (di->charge_state == STATE_NORMAL_INIT ||
-                       di->charge_state == STATE_MAINTENANCE_A_INIT ||
-                       di->charge_state == STATE_MAINTENANCE_B_INIT)
-               queue_work(di->chargalg_wq, &di->chargalg_work);
-}
-
-/**
- * abx500_chargalg_periodic_work() - Periodic work for the algorithm
- * @work:      pointer to the work_struct structure
- *
- * Work queue function for the charging algorithm
- */
-static void abx500_chargalg_periodic_work(struct work_struct *work)
-{
-       struct abx500_chargalg *di = container_of(work,
-               struct abx500_chargalg, chargalg_periodic_work.work);
-
-       abx500_chargalg_algorithm(di);
-
-       /*
-        * If a charger is connected then the battery has to be monitored
-        * frequently, else the work can be delayed.
-        */
-       if (di->chg_info.conn_chg)
-               queue_delayed_work(di->chargalg_wq,
-                       &di->chargalg_periodic_work,
-                       di->bm->interval_charging * HZ);
-       else
-               queue_delayed_work(di->chargalg_wq,
-                       &di->chargalg_periodic_work,
-                       di->bm->interval_not_charging * HZ);
-}
-
-/**
- * abx500_chargalg_wd_work() - periodic work to kick the charger watchdog
- * @work:      pointer to the work_struct structure
- *
- * Work queue function for kicking the charger watchdog
- */
-static void abx500_chargalg_wd_work(struct work_struct *work)
-{
-       int ret;
-       struct abx500_chargalg *di = container_of(work,
-               struct abx500_chargalg, chargalg_wd_work.work);
-
-       dev_dbg(di->dev, "abx500_chargalg_wd_work\n");
-
-       ret = abx500_chargalg_kick_watchdog(di);
-       if (ret < 0)
-               dev_err(di->dev, "failed to kick watchdog\n");
-
-       queue_delayed_work(di->chargalg_wq,
-               &di->chargalg_wd_work, CHG_WD_INTERVAL);
-}
-
-/**
- * abx500_chargalg_work() - Work to run the charging algorithm instantly
- * @work:      pointer to the work_struct structure
- *
- * Work queue function for calling the charging algorithm
- */
-static void abx500_chargalg_work(struct work_struct *work)
-{
-       struct abx500_chargalg *di = container_of(work,
-               struct abx500_chargalg, chargalg_work);
-
-       abx500_chargalg_algorithm(di);
-}
-
-/**
- * abx500_chargalg_get_property() - get the chargalg properties
- * @psy:       pointer to the power_supply structure
- * @psp:       pointer to the power_supply_property structure
- * @val:       pointer to the power_supply_propval union
- *
- * This function gets called when an application tries to get the
- * chargalg properties by reading the sysfs files.
- * status:     charging/discharging/full/unknown
- * health:     health of the battery
- * Returns error code in case of failure else 0 on success
- */
-static int abx500_chargalg_get_property(struct power_supply *psy,
-       enum power_supply_property psp,
-       union power_supply_propval *val)
-{
-       struct abx500_chargalg *di = power_supply_get_drvdata(psy);
-
-       switch (psp) {
-       case POWER_SUPPLY_PROP_STATUS:
-               val->intval = di->charge_status;
-               break;
-       case POWER_SUPPLY_PROP_HEALTH:
-               if (di->events.batt_ovv) {
-                       val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
-               } else if (di->events.btemp_underover) {
-                       if (di->batt_data.temp <= di->bm->temp_under)
-                               val->intval = POWER_SUPPLY_HEALTH_COLD;
-                       else
-                               val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
-               } else if (di->charge_state == STATE_SAFETY_TIMER_EXPIRED ||
-                          di->charge_state == STATE_SAFETY_TIMER_EXPIRED_INIT) {
-                       val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
-               } else {
-                       val->intval = POWER_SUPPLY_HEALTH_GOOD;
-               }
-               break;
-       default:
-               return -EINVAL;
-       }
-       return 0;
-}
-
-/* Exposure to the sysfs interface */
-
-static ssize_t abx500_chargalg_curr_step_show(struct abx500_chargalg *di,
-                                             char *buf)
-{
-       return sprintf(buf, "%d\n", di->curr_status.curr_step);
-}
-
-static ssize_t abx500_chargalg_curr_step_store(struct abx500_chargalg *di,
-                                              const char *buf, size_t length)
-{
-       long int param;
-       int ret;
-
-       ret = kstrtol(buf, 10, &param);
-       if (ret < 0)
-               return ret;
-
-       di->curr_status.curr_step = param;
-       if (di->curr_status.curr_step >= CHARGALG_CURR_STEP_LOW &&
-               di->curr_status.curr_step <= CHARGALG_CURR_STEP_HIGH) {
-               di->curr_status.curr_step_change = true;
-               queue_work(di->chargalg_wq, &di->chargalg_work);
-       } else
-               dev_info(di->dev, "Wrong current step\n"
-                       "Enter 0. Disable AC/USB Charging\n"
-                       "1--100. Set AC/USB charging current step\n"
-                       "100. Enable AC/USB Charging\n");
-
-       return strlen(buf);
-}
-
-
-static ssize_t abx500_chargalg_en_show(struct abx500_chargalg *di,
-                                      char *buf)
-{
-       return sprintf(buf, "%d\n",
-                      di->susp_status.ac_suspended &&
-                      di->susp_status.usb_suspended);
-}
-
-static ssize_t abx500_chargalg_en_store(struct abx500_chargalg *di,
-       const char *buf, size_t length)
-{
-       long int param;
-       int ac_usb;
-       int ret;
-
-       ret = kstrtol(buf, 10, &param);
-       if (ret < 0)
-               return ret;
-
-       ac_usb = param;
-       switch (ac_usb) {
-       case 0:
-               /* Disable charging */
-               di->susp_status.ac_suspended = true;
-               di->susp_status.usb_suspended = true;
-               di->susp_status.suspended_change = true;
-               /* Trigger a state change */
-               queue_work(di->chargalg_wq,
-                       &di->chargalg_work);
-               break;
-       case 1:
-               /* Enable AC Charging */
-               di->susp_status.ac_suspended = false;
-               di->susp_status.suspended_change = true;
-               /* Trigger a state change */
-               queue_work(di->chargalg_wq,
-                       &di->chargalg_work);
-               break;
-       case 2:
-               /* Enable USB charging */
-               di->susp_status.usb_suspended = false;
-               di->susp_status.suspended_change = true;
-               /* Trigger a state change */
-               queue_work(di->chargalg_wq,
-                       &di->chargalg_work);
-               break;
-       default:
-               dev_info(di->dev, "Wrong input\n"
-                       "Enter 0. Disable AC/USB Charging\n"
-                       "1. Enable AC charging\n"
-                       "2. Enable USB Charging\n");
-       }
-       return strlen(buf);
-}
-
-static struct abx500_chargalg_sysfs_entry abx500_chargalg_en_charger =
-       __ATTR(chargalg, 0644, abx500_chargalg_en_show,
-                               abx500_chargalg_en_store);
-
-static struct abx500_chargalg_sysfs_entry abx500_chargalg_curr_step =
-       __ATTR(chargalg_curr_step, 0644, abx500_chargalg_curr_step_show,
-                                       abx500_chargalg_curr_step_store);
-
-static ssize_t abx500_chargalg_sysfs_show(struct kobject *kobj,
-       struct attribute *attr, char *buf)
-{
-       struct abx500_chargalg_sysfs_entry *entry = container_of(attr,
-               struct abx500_chargalg_sysfs_entry, attr);
-
-       struct abx500_chargalg *di = container_of(kobj,
-               struct abx500_chargalg, chargalg_kobject);
-
-       if (!entry->show)
-               return -EIO;
-
-       return entry->show(di, buf);
-}
-
-static ssize_t abx500_chargalg_sysfs_charger(struct kobject *kobj,
-       struct attribute *attr, const char *buf, size_t length)
-{
-       struct abx500_chargalg_sysfs_entry *entry = container_of(attr,
-               struct abx500_chargalg_sysfs_entry, attr);
-
-       struct abx500_chargalg *di = container_of(kobj,
-               struct abx500_chargalg, chargalg_kobject);
-
-       if (!entry->store)
-               return -EIO;
-
-       return entry->store(di, buf, length);
-}
-
-static struct attribute *abx500_chargalg_chg[] = {
-       &abx500_chargalg_en_charger.attr,
-       &abx500_chargalg_curr_step.attr,
-       NULL,
-};
-
-static const struct sysfs_ops abx500_chargalg_sysfs_ops = {
-       .show = abx500_chargalg_sysfs_show,
-       .store = abx500_chargalg_sysfs_charger,
-};
-
-static struct kobj_type abx500_chargalg_ktype = {
-       .sysfs_ops = &abx500_chargalg_sysfs_ops,
-       .default_attrs = abx500_chargalg_chg,
-};
-
-/**
- * abx500_chargalg_sysfs_exit() - de-init of sysfs entry
- * @di:                pointer to the struct abx500_chargalg
- *
- * This function removes the entry in sysfs.
- */
-static void abx500_chargalg_sysfs_exit(struct abx500_chargalg *di)
-{
-       kobject_del(&di->chargalg_kobject);
-}
-
-/**
- * abx500_chargalg_sysfs_init() - init of sysfs entry
- * @di:                pointer to the struct abx500_chargalg
- *
- * This function adds an entry in sysfs.
- * Returns error code in case of failure else 0(on success)
- */
-static int abx500_chargalg_sysfs_init(struct abx500_chargalg *di)
-{
-       int ret = 0;
-
-       ret = kobject_init_and_add(&di->chargalg_kobject,
-               &abx500_chargalg_ktype,
-               NULL, "abx500_chargalg");
-       if (ret < 0)
-               dev_err(di->dev, "failed to create sysfs entry\n");
-
-       return ret;
-}
-/* Exposure to the sysfs interface <<END>> */
-
-static int __maybe_unused abx500_chargalg_resume(struct device *dev)
-{
-       struct abx500_chargalg *di = dev_get_drvdata(dev);
-
-       /* Kick charger watchdog if charging (any charger online) */
-       if (di->chg_info.online_chg)
-               queue_delayed_work(di->chargalg_wq, &di->chargalg_wd_work, 0);
-
-       /*
-        * Run the charging algorithm directly to be sure we don't
-        * do it too seldom
-        */
-       queue_delayed_work(di->chargalg_wq, &di->chargalg_periodic_work, 0);
-
-       return 0;
-}
-
-static int __maybe_unused abx500_chargalg_suspend(struct device *dev)
-{
-       struct abx500_chargalg *di = dev_get_drvdata(dev);
-
-       if (di->chg_info.online_chg)
-               cancel_delayed_work_sync(&di->chargalg_wd_work);
-
-       cancel_delayed_work_sync(&di->chargalg_periodic_work);
-
-       return 0;
-}
-
-static char *supply_interface[] = {
-       "ab8500_fg",
-};
-
-static const struct power_supply_desc abx500_chargalg_desc = {
-       .name                   = "abx500_chargalg",
-       .type                   = POWER_SUPPLY_TYPE_BATTERY,
-       .properties             = abx500_chargalg_props,
-       .num_properties         = ARRAY_SIZE(abx500_chargalg_props),
-       .get_property           = abx500_chargalg_get_property,
-       .external_power_changed = abx500_chargalg_external_power_changed,
-};
-
-static int abx500_chargalg_bind(struct device *dev, struct device *master,
-                               void *data)
-{
-       struct abx500_chargalg *di = dev_get_drvdata(dev);
-
-       /* Create a work queue for the chargalg */
-       di->chargalg_wq = alloc_ordered_workqueue("abx500_chargalg_wq",
-                                                 WQ_MEM_RECLAIM);
-       if (di->chargalg_wq == NULL) {
-               dev_err(di->dev, "failed to create work queue\n");
-               return -ENOMEM;
-       }
-
-       /* Run the charging algorithm */
-       queue_delayed_work(di->chargalg_wq, &di->chargalg_periodic_work, 0);
-
-       return 0;
-}
-
-static void abx500_chargalg_unbind(struct device *dev, struct device *master,
-                                  void *data)
-{
-       struct abx500_chargalg *di = dev_get_drvdata(dev);
-
-       /* Stop all timers and work */
-       hrtimer_cancel(&di->safety_timer);
-       hrtimer_cancel(&di->maintenance_timer);
-
-       cancel_delayed_work_sync(&di->chargalg_periodic_work);
-       cancel_delayed_work_sync(&di->chargalg_wd_work);
-       cancel_work_sync(&di->chargalg_work);
-
-       /* Delete the work queue */
-       destroy_workqueue(di->chargalg_wq);
-       flush_scheduled_work();
-}
-
-static const struct component_ops abx500_chargalg_component_ops = {
-       .bind = abx500_chargalg_bind,
-       .unbind = abx500_chargalg_unbind,
-};
-
-static int abx500_chargalg_probe(struct platform_device *pdev)
-{
-       struct device *dev = &pdev->dev;
-       struct power_supply_config psy_cfg = {};
-       struct abx500_chargalg *di;
-       int ret = 0;
-
-       di = devm_kzalloc(dev, sizeof(*di), GFP_KERNEL);
-       if (!di)
-               return -ENOMEM;
-
-       di->bm = &ab8500_bm_data;
-
-       /* get device struct and parent */
-       di->dev = dev;
-       di->parent = dev_get_drvdata(pdev->dev.parent);
-
-       psy_cfg.supplied_to = supply_interface;
-       psy_cfg.num_supplicants = ARRAY_SIZE(supply_interface);
-       psy_cfg.drv_data = di;
-
-       /* Initilialize safety timer */
-       hrtimer_init(&di->safety_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
-       di->safety_timer.function = abx500_chargalg_safety_timer_expired;
-
-       /* Initilialize maintenance timer */
-       hrtimer_init(&di->maintenance_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
-       di->maintenance_timer.function =
-               abx500_chargalg_maintenance_timer_expired;
-
-       /* Init work for chargalg */
-       INIT_DEFERRABLE_WORK(&di->chargalg_periodic_work,
-               abx500_chargalg_periodic_work);
-       INIT_DEFERRABLE_WORK(&di->chargalg_wd_work,
-               abx500_chargalg_wd_work);
-
-       /* Init work for chargalg */
-       INIT_WORK(&di->chargalg_work, abx500_chargalg_work);
-
-       /* To detect charger at startup */
-       di->chg_info.prev_conn_chg = -1;
-
-       /* Register chargalg power supply class */
-       di->chargalg_psy = devm_power_supply_register(di->dev,
-                                                &abx500_chargalg_desc,
-                                                &psy_cfg);
-       if (IS_ERR(di->chargalg_psy)) {
-               dev_err(di->dev, "failed to register chargalg psy\n");
-               return PTR_ERR(di->chargalg_psy);
-       }
-
-       platform_set_drvdata(pdev, di);
-
-       /* sysfs interface to enable/disable charging from user space */
-       ret = abx500_chargalg_sysfs_init(di);
-       if (ret) {
-               dev_err(di->dev, "failed to create sysfs entry\n");
-               return ret;
-       }
-       di->curr_status.curr_step = CHARGALG_CURR_STEP_HIGH;
-
-       dev_info(di->dev, "probe success\n");
-       return component_add(dev, &abx500_chargalg_component_ops);
-}
-
-static int abx500_chargalg_remove(struct platform_device *pdev)
-{
-       struct abx500_chargalg *di = platform_get_drvdata(pdev);
-
-       component_del(&pdev->dev, &abx500_chargalg_component_ops);
-
-       /* sysfs interface to enable/disable charging from user space */
-       abx500_chargalg_sysfs_exit(di);
-
-       return 0;
-}
-
-static SIMPLE_DEV_PM_OPS(abx500_chargalg_pm_ops, abx500_chargalg_suspend, abx500_chargalg_resume);
-
-static const struct of_device_id ab8500_chargalg_match[] = {
-       { .compatible = "stericsson,ab8500-chargalg", },
-       { },
-};
-
-struct platform_driver abx500_chargalg_driver = {
-       .probe = abx500_chargalg_probe,
-       .remove = abx500_chargalg_remove,
-       .driver = {
-               .name = "ab8500-chargalg",
-               .of_match_table = ab8500_chargalg_match,
-               .pm = &abx500_chargalg_pm_ops,
-       },
-};
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Johan Palsson, Karl Komierowski");
-MODULE_ALIAS("platform:abx500-chargalg");
-MODULE_DESCRIPTION("abx500 battery charging algorithm");
index a4df1ea..b9553be 100644 (file)
@@ -813,7 +813,7 @@ static int axp288_charger_probe(struct platform_device *pdev)
        if (val == 0)
                return -ENODEV;
 
-       info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+       info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
        if (!info)
                return -ENOMEM;
 
@@ -823,7 +823,7 @@ static int axp288_charger_probe(struct platform_device *pdev)
 
        info->cable.edev = extcon_get_extcon_dev(AXP288_EXTCON_DEV_NAME);
        if (info->cable.edev == NULL) {
-               dev_dbg(&pdev->dev, "%s is not ready, probe deferred\n",
+               dev_dbg(dev, "%s is not ready, probe deferred\n",
                        AXP288_EXTCON_DEV_NAME);
                return -EPROBE_DEFER;
        }
@@ -834,8 +834,7 @@ static int axp288_charger_probe(struct platform_device *pdev)
                        dev_dbg(dev, "EXTCON_USB_HOST is not ready, probe deferred\n");
                        return -EPROBE_DEFER;
                }
-               dev_info(&pdev->dev,
-                        "Using " USB_HOST_EXTCON_HID " extcon for usb-id\n");
+               dev_info(dev, "Using " USB_HOST_EXTCON_HID " extcon for usb-id\n");
        }
 
        platform_set_drvdata(pdev, info);
@@ -874,7 +873,7 @@ static int axp288_charger_probe(struct platform_device *pdev)
        INIT_WORK(&info->otg.work, axp288_charger_otg_evt_worker);
        info->otg.id_nb.notifier_call = axp288_charger_handle_otg_evt;
        if (info->otg.cable) {
-               ret = devm_extcon_register_notifier(&pdev->dev, info->otg.cable,
+               ret = devm_extcon_register_notifier(dev, info->otg.cable,
                                        EXTCON_USB_HOST, &info->otg.id_nb);
                if (ret) {
                        dev_err(dev, "failed to register EXTCON_USB_HOST notifier\n");
@@ -899,7 +898,7 @@ static int axp288_charger_probe(struct platform_device *pdev)
                                        NULL, axp288_charger_irq_thread_handler,
                                        IRQF_ONESHOT, info->pdev->name, info);
                if (ret) {
-                       dev_err(&pdev->dev, "failed to request interrupt=%d\n",
+                       dev_err(dev, "failed to request interrupt=%d\n",
                                                                info->irq[i]);
                        return ret;
                }
index 2ba2d8d..c1da217 100644 (file)
@@ -2,7 +2,8 @@
 /*
  * axp288_fuel_gauge.c - Xpower AXP288 PMIC Fuel Gauge Driver
  *
- * Copyright (C) 2016-2017 Hans de Goede <hdegoede@redhat.com>
+ * Copyright (C) 2020-2021 Andrejus Basovas <xxx@yyy.tld>
+ * Copyright (C) 2016-2021 Hans de Goede <hdegoede@redhat.com>
  * Copyright (C) 2014 Intel Corporation
  *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 #include <linux/platform_device.h>
 #include <linux/power_supply.h>
 #include <linux/iio/consumer.h>
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
 #include <asm/unaligned.h>
+#include <asm/iosf_mbi.h>
 
-#define PS_STAT_VBUS_TRIGGER           (1 << 0)
-#define PS_STAT_BAT_CHRG_DIR           (1 << 2)
-#define PS_STAT_VBAT_ABOVE_VHOLD       (1 << 3)
-#define PS_STAT_VBUS_VALID             (1 << 4)
-#define PS_STAT_VBUS_PRESENT           (1 << 5)
+#define PS_STAT_VBUS_TRIGGER                   (1 << 0)
+#define PS_STAT_BAT_CHRG_DIR                   (1 << 2)
+#define PS_STAT_VBAT_ABOVE_VHOLD               (1 << 3)
+#define PS_STAT_VBUS_VALID                     (1 << 4)
+#define PS_STAT_VBUS_PRESENT                   (1 << 5)
 
-#define CHRG_STAT_BAT_SAFE_MODE                (1 << 3)
+#define CHRG_STAT_BAT_SAFE_MODE                        (1 << 3)
 #define CHRG_STAT_BAT_VALID                    (1 << 4)
-#define CHRG_STAT_BAT_PRESENT          (1 << 5)
+#define CHRG_STAT_BAT_PRESENT                  (1 << 5)
 #define CHRG_STAT_CHARGING                     (1 << 6)
 #define CHRG_STAT_PMIC_OTP                     (1 << 7)
 
 #define CHRG_CCCV_CC_MASK                      0xf     /* 4 bits */
-#define CHRG_CCCV_CC_BIT_POS           0
+#define CHRG_CCCV_CC_BIT_POS                   0
 #define CHRG_CCCV_CC_OFFSET                    200     /* 200mA */
-#define CHRG_CCCV_CC_LSB_RES           200     /* 200mA */
+#define CHRG_CCCV_CC_LSB_RES                   200     /* 200mA */
 #define CHRG_CCCV_ITERM_20P                    (1 << 4)    /* 20% of CC */
 #define CHRG_CCCV_CV_MASK                      0x60        /* 2 bits */
-#define CHRG_CCCV_CV_BIT_POS           5
+#define CHRG_CCCV_CV_BIT_POS                   5
 #define CHRG_CCCV_CV_4100MV                    0x0     /* 4.10V */
 #define CHRG_CCCV_CV_4150MV                    0x1     /* 4.15V */
 #define CHRG_CCCV_CV_4200MV                    0x2     /* 4.20V */
 #define CHRG_CCCV_CV_4350MV                    0x3     /* 4.35V */
 #define CHRG_CCCV_CHG_EN                       (1 << 7)
 
-#define FG_CNTL_OCV_ADJ_STAT           (1 << 2)
+#define FG_CNTL_OCV_ADJ_STAT                   (1 << 2)
 #define FG_CNTL_OCV_ADJ_EN                     (1 << 3)
-#define FG_CNTL_CAP_ADJ_STAT           (1 << 4)
+#define FG_CNTL_CAP_ADJ_STAT                   (1 << 4)
 #define FG_CNTL_CAP_ADJ_EN                     (1 << 5)
 #define FG_CNTL_CC_EN                          (1 << 6)
 #define FG_CNTL_GAUGE_EN                       (1 << 7)
 #define FG_CC_CAP_VALID                                (1 << 7)
 #define FG_CC_CAP_VAL_MASK                     0x7F
 
-#define FG_LOW_CAP_THR1_MASK           0xf0    /* 5% tp 20% */
+#define FG_LOW_CAP_THR1_MASK                   0xf0    /* 5% tp 20% */
 #define FG_LOW_CAP_THR1_VAL                    0xa0    /* 15 perc */
-#define FG_LOW_CAP_THR2_MASK           0x0f    /* 0% to 15% */
+#define FG_LOW_CAP_THR2_MASK                   0x0f    /* 0% to 15% */
 #define FG_LOW_CAP_WARN_THR                    14  /* 14 perc */
 #define FG_LOW_CAP_CRIT_THR                    4   /* 4 perc */
 #define FG_LOW_CAP_SHDN_THR                    0   /* 0 perc */
 
-#define NR_RETRY_CNT    3
-#define DEV_NAME       "axp288_fuel_gauge"
+#define DEV_NAME                               "axp288_fuel_gauge"
 
 /* 1.1mV per LSB expressed in uV */
 #define VOLTAGE_FROM_ADC(a)                    ((a * 11) / 10)
 /* properties converted to uV, uA */
-#define PROP_VOLT(a)           ((a) * 1000)
-#define PROP_CURR(a)           ((a) * 1000)
+#define PROP_VOLT(a)                           ((a) * 1000)
+#define PROP_CURR(a)                           ((a) * 1000)
 
-#define AXP288_FG_INTR_NUM     6
+#define AXP288_REG_UPDATE_INTERVAL             (60 * HZ)
+#define AXP288_FG_INTR_NUM                     6
 enum {
        QWBTU_IRQ = 0,
        WBTU_IRQ,
@@ -98,9 +98,6 @@ enum {
 };
 
 enum {
-       BAT_TEMP = 0,
-       PMIC_TEMP,
-       SYSTEM_TEMP,
        BAT_CHRG_CURR,
        BAT_D_CURR,
        BAT_VOLT,
@@ -108,7 +105,7 @@ enum {
 };
 
 struct axp288_fg_info {
-       struct platform_device *pdev;
+       struct device *dev;
        struct regmap *regmap;
        struct regmap_irq_chip_data *regmap_irqc;
        int irq[AXP288_FG_INTR_NUM];
@@ -117,7 +114,21 @@ struct axp288_fg_info {
        struct mutex lock;
        int status;
        int max_volt;
+       int pwr_op;
+       int low_cap;
        struct dentry *debug_file;
+
+       char valid;                 /* zero until following fields are valid */
+       unsigned long last_updated; /* in jiffies */
+
+       int pwr_stat;
+       int fg_res;
+       int bat_volt;
+       int d_curr;
+       int c_curr;
+       int ocv;
+       int fg_cc_mtr1;
+       int fg_des_cap1;
 };
 
 static enum power_supply_property fuel_gauge_props[] = {
@@ -137,17 +148,12 @@ static enum power_supply_property fuel_gauge_props[] = {
 
 static int fuel_gauge_reg_readb(struct axp288_fg_info *info, int reg)
 {
-       int ret, i;
        unsigned int val;
+       int ret;
 
-       for (i = 0; i < NR_RETRY_CNT; i++) {
-               ret = regmap_read(info->regmap, reg, &val);
-               if (ret != -EBUSY)
-                       break;
-       }
-
+       ret = regmap_read(info->regmap, reg, &val);
        if (ret < 0) {
-               dev_err(&info->pdev->dev, "axp288 reg read err:%d\n", ret);
+               dev_err(info->dev, "Error reading reg 0x%02x err: %d\n", reg, ret);
                return ret;
        }
 
@@ -161,7 +167,7 @@ static int fuel_gauge_reg_writeb(struct axp288_fg_info *info, int reg, u8 val)
        ret = regmap_write(info->regmap, reg, (unsigned int)val);
 
        if (ret < 0)
-               dev_err(&info->pdev->dev, "axp288 reg write err:%d\n", ret);
+               dev_err(info->dev, "Error writing reg 0x%02x err: %d\n", reg, ret);
 
        return ret;
 }
@@ -173,15 +179,13 @@ static int fuel_gauge_read_15bit_word(struct axp288_fg_info *info, int reg)
 
        ret = regmap_bulk_read(info->regmap, reg, buf, 2);
        if (ret < 0) {
-               dev_err(&info->pdev->dev, "Error reading reg 0x%02x err: %d\n",
-                       reg, ret);
+               dev_err(info->dev, "Error reading reg 0x%02x err: %d\n", reg, ret);
                return ret;
        }
 
        ret = get_unaligned_be16(buf);
        if (!(ret & FG_15BIT_WORD_VALID)) {
-               dev_err(&info->pdev->dev, "Error reg 0x%02x contents not valid\n",
-                       reg);
+               dev_err(info->dev, "Error reg 0x%02x contents not valid\n", reg);
                return -ENXIO;
        }
 
@@ -195,8 +199,7 @@ static int fuel_gauge_read_12bit_word(struct axp288_fg_info *info, int reg)
 
        ret = regmap_bulk_read(info->regmap, reg, buf, 2);
        if (ret < 0) {
-               dev_err(&info->pdev->dev, "Error reading reg 0x%02x err: %d\n",
-                       reg, ret);
+               dev_err(info->dev, "Error reading reg 0x%02x err: %d\n", reg, ret);
                return ret;
        }
 
@@ -204,139 +207,78 @@ static int fuel_gauge_read_12bit_word(struct axp288_fg_info *info, int reg)
        return (buf[0] << 4) | ((buf[1] >> 4) & 0x0f);
 }
 
-#ifdef CONFIG_DEBUG_FS
-static int fuel_gauge_debug_show(struct seq_file *s, void *data)
+static int fuel_gauge_update_registers(struct axp288_fg_info *info)
 {
-       struct axp288_fg_info *info = s->private;
-       int raw_val, ret;
-
-       seq_printf(s, " PWR_STATUS[%02x] : %02x\n",
-               AXP20X_PWR_INPUT_STATUS,
-               fuel_gauge_reg_readb(info, AXP20X_PWR_INPUT_STATUS));
-       seq_printf(s, "PWR_OP_MODE[%02x] : %02x\n",
-               AXP20X_PWR_OP_MODE,
-               fuel_gauge_reg_readb(info, AXP20X_PWR_OP_MODE));
-       seq_printf(s, " CHRG_CTRL1[%02x] : %02x\n",
-               AXP20X_CHRG_CTRL1,
-               fuel_gauge_reg_readb(info, AXP20X_CHRG_CTRL1));
-       seq_printf(s, "       VLTF[%02x] : %02x\n",
-               AXP20X_V_LTF_DISCHRG,
-               fuel_gauge_reg_readb(info, AXP20X_V_LTF_DISCHRG));
-       seq_printf(s, "       VHTF[%02x] : %02x\n",
-               AXP20X_V_HTF_DISCHRG,
-               fuel_gauge_reg_readb(info, AXP20X_V_HTF_DISCHRG));
-       seq_printf(s, "    CC_CTRL[%02x] : %02x\n",
-               AXP20X_CC_CTRL,
-               fuel_gauge_reg_readb(info, AXP20X_CC_CTRL));
-       seq_printf(s, "BATTERY CAP[%02x] : %02x\n",
-               AXP20X_FG_RES,
-               fuel_gauge_reg_readb(info, AXP20X_FG_RES));
-       seq_printf(s, "    FG_RDC1[%02x] : %02x\n",
-               AXP288_FG_RDC1_REG,
-               fuel_gauge_reg_readb(info, AXP288_FG_RDC1_REG));
-       seq_printf(s, "    FG_RDC0[%02x] : %02x\n",
-               AXP288_FG_RDC0_REG,
-               fuel_gauge_reg_readb(info, AXP288_FG_RDC0_REG));
-       seq_printf(s, "     FG_OCV[%02x] : %04x\n",
-               AXP288_FG_OCVH_REG,
-               fuel_gauge_read_12bit_word(info, AXP288_FG_OCVH_REG));
-       seq_printf(s, " FG_DES_CAP[%02x] : %04x\n",
-               AXP288_FG_DES_CAP1_REG,
-               fuel_gauge_read_15bit_word(info, AXP288_FG_DES_CAP1_REG));
-       seq_printf(s, "  FG_CC_MTR[%02x] : %04x\n",
-               AXP288_FG_CC_MTR1_REG,
-               fuel_gauge_read_15bit_word(info, AXP288_FG_CC_MTR1_REG));
-       seq_printf(s, " FG_OCV_CAP[%02x] : %02x\n",
-               AXP288_FG_OCV_CAP_REG,
-               fuel_gauge_reg_readb(info, AXP288_FG_OCV_CAP_REG));
-       seq_printf(s, "  FG_CC_CAP[%02x] : %02x\n",
-               AXP288_FG_CC_CAP_REG,
-               fuel_gauge_reg_readb(info, AXP288_FG_CC_CAP_REG));
-       seq_printf(s, " FG_LOW_CAP[%02x] : %02x\n",
-               AXP288_FG_LOW_CAP_REG,
-               fuel_gauge_reg_readb(info, AXP288_FG_LOW_CAP_REG));
-       seq_printf(s, "TUNING_CTL0[%02x] : %02x\n",
-               AXP288_FG_TUNE0,
-               fuel_gauge_reg_readb(info, AXP288_FG_TUNE0));
-       seq_printf(s, "TUNING_CTL1[%02x] : %02x\n",
-               AXP288_FG_TUNE1,
-               fuel_gauge_reg_readb(info, AXP288_FG_TUNE1));
-       seq_printf(s, "TUNING_CTL2[%02x] : %02x\n",
-               AXP288_FG_TUNE2,
-               fuel_gauge_reg_readb(info, AXP288_FG_TUNE2));
-       seq_printf(s, "TUNING_CTL3[%02x] : %02x\n",
-               AXP288_FG_TUNE3,
-               fuel_gauge_reg_readb(info, AXP288_FG_TUNE3));
-       seq_printf(s, "TUNING_CTL4[%02x] : %02x\n",
-               AXP288_FG_TUNE4,
-               fuel_gauge_reg_readb(info, AXP288_FG_TUNE4));
-       seq_printf(s, "TUNING_CTL5[%02x] : %02x\n",
-               AXP288_FG_TUNE5,
-               fuel_gauge_reg_readb(info, AXP288_FG_TUNE5));
-
-       ret = iio_read_channel_raw(info->iio_channel[BAT_TEMP], &raw_val);
-       if (ret >= 0)
-               seq_printf(s, "axp288-batttemp : %d\n", raw_val);
-       ret = iio_read_channel_raw(info->iio_channel[PMIC_TEMP], &raw_val);
-       if (ret >= 0)
-               seq_printf(s, "axp288-pmictemp : %d\n", raw_val);
-       ret = iio_read_channel_raw(info->iio_channel[SYSTEM_TEMP], &raw_val);
-       if (ret >= 0)
-               seq_printf(s, "axp288-systtemp : %d\n", raw_val);
-       ret = iio_read_channel_raw(info->iio_channel[BAT_CHRG_CURR], &raw_val);
-       if (ret >= 0)
-               seq_printf(s, "axp288-chrgcurr : %d\n", raw_val);
-       ret = iio_read_channel_raw(info->iio_channel[BAT_D_CURR], &raw_val);
-       if (ret >= 0)
-               seq_printf(s, "axp288-dchrgcur : %d\n", raw_val);
-       ret = iio_read_channel_raw(info->iio_channel[BAT_VOLT], &raw_val);
-       if (ret >= 0)
-               seq_printf(s, "axp288-battvolt : %d\n", raw_val);
+       int ret;
 
-       return 0;
-}
+       if (info->valid && time_before(jiffies, info->last_updated + AXP288_REG_UPDATE_INTERVAL))
+               return 0;
 
-DEFINE_SHOW_ATTRIBUTE(fuel_gauge_debug);
+       dev_dbg(info->dev, "Fuel Gauge updating register values...\n");
 
-static void fuel_gauge_create_debugfs(struct axp288_fg_info *info)
-{
-       info->debug_file = debugfs_create_file("fuelgauge", 0666, NULL,
-               info, &fuel_gauge_debug_fops);
-}
+       ret = iosf_mbi_block_punit_i2c_access();
+       if (ret < 0)
+               return ret;
 
-static void fuel_gauge_remove_debugfs(struct axp288_fg_info *info)
-{
-       debugfs_remove(info->debug_file);
-}
-#else
-static inline void fuel_gauge_create_debugfs(struct axp288_fg_info *info)
-{
-}
-static inline void fuel_gauge_remove_debugfs(struct axp288_fg_info *info)
-{
+       ret = fuel_gauge_reg_readb(info, AXP20X_PWR_INPUT_STATUS);
+       if (ret < 0)
+               goto out;
+       info->pwr_stat = ret;
+
+       ret = fuel_gauge_reg_readb(info, AXP20X_FG_RES);
+       if (ret < 0)
+               goto out;
+       info->fg_res = ret;
+
+       ret = iio_read_channel_raw(info->iio_channel[BAT_VOLT], &info->bat_volt);
+       if (ret < 0)
+               goto out;
+
+       if (info->pwr_stat & PS_STAT_BAT_CHRG_DIR) {
+               info->d_curr = 0;
+               ret = iio_read_channel_raw(info->iio_channel[BAT_CHRG_CURR], &info->c_curr);
+               if (ret < 0)
+                       goto out;
+       } else {
+               info->c_curr = 0;
+               ret = iio_read_channel_raw(info->iio_channel[BAT_D_CURR], &info->d_curr);
+               if (ret < 0)
+                       goto out;
+       }
+
+       ret = fuel_gauge_read_12bit_word(info, AXP288_FG_OCVH_REG);
+       if (ret < 0)
+               goto out;
+       info->ocv = ret;
+
+       ret = fuel_gauge_read_15bit_word(info, AXP288_FG_CC_MTR1_REG);
+       if (ret < 0)
+               goto out;
+       info->fg_cc_mtr1 = ret;
+
+       ret = fuel_gauge_read_15bit_word(info, AXP288_FG_DES_CAP1_REG);
+       if (ret < 0)
+               goto out;
+       info->fg_des_cap1 = ret;
+
+       info->last_updated = jiffies;
+       info->valid = 1;
+       ret = 0;
+out:
+       iosf_mbi_unblock_punit_i2c_access();
+       return ret;
 }
-#endif
 
 static void fuel_gauge_get_status(struct axp288_fg_info *info)
 {
-       int pwr_stat, fg_res, curr, ret;
-
-       pwr_stat = fuel_gauge_reg_readb(info, AXP20X_PWR_INPUT_STATUS);
-       if (pwr_stat < 0) {
-               dev_err(&info->pdev->dev,
-                       "PWR STAT read failed:%d\n", pwr_stat);
-               return;
-       }
+       int pwr_stat = info->pwr_stat;
+       int fg_res = info->fg_res;
+       int curr = info->d_curr;
 
        /* Report full if Vbus is valid and the reported capacity is 100% */
        if (!(pwr_stat & PS_STAT_VBUS_VALID))
                goto not_full;
 
-       fg_res = fuel_gauge_reg_readb(info, AXP20X_FG_RES);
-       if (fg_res < 0) {
-               dev_err(&info->pdev->dev, "FG RES read failed: %d\n", fg_res);
-               return;
-       }
        if (!(fg_res & FG_REP_CAP_VALID))
                goto not_full;
 
@@ -354,11 +296,6 @@ static void fuel_gauge_get_status(struct axp288_fg_info *info)
        if (fg_res < 90 || (pwr_stat & PS_STAT_BAT_CHRG_DIR))
                goto not_full;
 
-       ret = iio_read_channel_raw(info->iio_channel[BAT_D_CURR], &curr);
-       if (ret < 0) {
-               dev_err(&info->pdev->dev, "FG get current failed: %d\n", ret);
-               return;
-       }
        if (curr == 0) {
                info->status = POWER_SUPPLY_STATUS_FULL;
                return;
@@ -371,61 +308,16 @@ not_full:
                info->status = POWER_SUPPLY_STATUS_DISCHARGING;
 }
 
-static int fuel_gauge_get_vbatt(struct axp288_fg_info *info, int *vbatt)
-{
-       int ret = 0, raw_val;
-
-       ret = iio_read_channel_raw(info->iio_channel[BAT_VOLT], &raw_val);
-       if (ret < 0)
-               goto vbatt_read_fail;
-
-       *vbatt = VOLTAGE_FROM_ADC(raw_val);
-vbatt_read_fail:
-       return ret;
-}
-
-static int fuel_gauge_get_current(struct axp288_fg_info *info, int *cur)
-{
-       int ret, discharge;
-
-       /* First check discharge current, so that we do only 1 read on bat. */
-       ret = iio_read_channel_raw(info->iio_channel[BAT_D_CURR], &discharge);
-       if (ret < 0)
-               return ret;
-
-       if (discharge > 0) {
-               *cur = -1 * discharge;
-               return 0;
-       }
-
-       return iio_read_channel_raw(info->iio_channel[BAT_CHRG_CURR], cur);
-}
-
-static int fuel_gauge_get_vocv(struct axp288_fg_info *info, int *vocv)
-{
-       int ret;
-
-       ret = fuel_gauge_read_12bit_word(info, AXP288_FG_OCVH_REG);
-       if (ret >= 0)
-               *vocv = VOLTAGE_FROM_ADC(ret);
-
-       return ret;
-}
-
 static int fuel_gauge_battery_health(struct axp288_fg_info *info)
 {
-       int ret, vocv, health = POWER_SUPPLY_HEALTH_UNKNOWN;
-
-       ret = fuel_gauge_get_vocv(info, &vocv);
-       if (ret < 0)
-               goto health_read_fail;
+       int vocv = VOLTAGE_FROM_ADC(info->ocv);
+       int health = POWER_SUPPLY_HEALTH_UNKNOWN;
 
        if (vocv > info->max_volt)
                health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
        else
                health = POWER_SUPPLY_HEALTH_GOOD;
 
-health_read_fail:
        return health;
 }
 
@@ -434,9 +326,14 @@ static int fuel_gauge_get_property(struct power_supply *ps,
                union power_supply_propval *val)
 {
        struct axp288_fg_info *info = power_supply_get_drvdata(ps);
-       int ret = 0, value;
+       int ret, value;
 
        mutex_lock(&info->lock);
+
+       ret = fuel_gauge_update_registers(info);
+       if (ret < 0)
+               goto out;
+
        switch (prop) {
        case POWER_SUPPLY_PROP_STATUS:
                fuel_gauge_get_status(info);
@@ -446,78 +343,52 @@ static int fuel_gauge_get_property(struct power_supply *ps,
                val->intval = fuel_gauge_battery_health(info);
                break;
        case POWER_SUPPLY_PROP_VOLTAGE_NOW:
-               ret = fuel_gauge_get_vbatt(info, &value);
-               if (ret < 0)
-                       goto fuel_gauge_read_err;
+               value = VOLTAGE_FROM_ADC(info->bat_volt);
                val->intval = PROP_VOLT(value);
                break;
        case POWER_SUPPLY_PROP_VOLTAGE_OCV:
-               ret = fuel_gauge_get_vocv(info, &value);
-               if (ret < 0)
-                       goto fuel_gauge_read_err;
+               value = VOLTAGE_FROM_ADC(info->ocv);
                val->intval = PROP_VOLT(value);
                break;
        case POWER_SUPPLY_PROP_CURRENT_NOW:
-               ret = fuel_gauge_get_current(info, &value);
-               if (ret < 0)
-                       goto fuel_gauge_read_err;
+               if (info->d_curr > 0)
+                       value = -1 * info->d_curr;
+               else
+                       value = info->c_curr;
+
                val->intval = PROP_CURR(value);
                break;
        case POWER_SUPPLY_PROP_PRESENT:
-               ret = fuel_gauge_reg_readb(info, AXP20X_PWR_OP_MODE);
-               if (ret < 0)
-                       goto fuel_gauge_read_err;
-
-               if (ret & CHRG_STAT_BAT_PRESENT)
+               if (info->pwr_op & CHRG_STAT_BAT_PRESENT)
                        val->intval = 1;
                else
                        val->intval = 0;
                break;
        case POWER_SUPPLY_PROP_CAPACITY:
-               ret = fuel_gauge_reg_readb(info, AXP20X_FG_RES);
-               if (ret < 0)
-                       goto fuel_gauge_read_err;
-
-               if (!(ret & FG_REP_CAP_VALID))
-                       dev_err(&info->pdev->dev,
-                               "capacity measurement not valid\n");
-               val->intval = (ret & FG_REP_CAP_VAL_MASK);
+               if (!(info->fg_res & FG_REP_CAP_VALID))
+                       dev_err(info->dev, "capacity measurement not valid\n");
+               val->intval = (info->fg_res & FG_REP_CAP_VAL_MASK);
                break;
        case POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN:
-               ret = fuel_gauge_reg_readb(info, AXP288_FG_LOW_CAP_REG);
-               if (ret < 0)
-                       goto fuel_gauge_read_err;
-               val->intval = (ret & 0x0f);
+               val->intval = (info->low_cap & 0x0f);
                break;
        case POWER_SUPPLY_PROP_TECHNOLOGY:
                val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
                break;
        case POWER_SUPPLY_PROP_CHARGE_NOW:
-               ret = fuel_gauge_read_15bit_word(info, AXP288_FG_CC_MTR1_REG);
-               if (ret < 0)
-                       goto fuel_gauge_read_err;
-
-               val->intval = ret * FG_DES_CAP_RES_LSB;
+               val->intval = info->fg_cc_mtr1 * FG_DES_CAP_RES_LSB;
                break;
        case POWER_SUPPLY_PROP_CHARGE_FULL:
-               ret = fuel_gauge_read_15bit_word(info, AXP288_FG_DES_CAP1_REG);
-               if (ret < 0)
-                       goto fuel_gauge_read_err;
-
-               val->intval = ret * FG_DES_CAP_RES_LSB;
+               val->intval = info->fg_des_cap1 * FG_DES_CAP_RES_LSB;
                break;
        case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
                val->intval = PROP_VOLT(info->max_volt);
                break;
        default:
-               mutex_unlock(&info->lock);
-               return -EINVAL;
+               ret = -EINVAL;
        }
 
-       mutex_unlock(&info->lock);
-       return 0;
-
-fuel_gauge_read_err:
+out:
        mutex_unlock(&info->lock);
        return ret;
 }
@@ -527,7 +398,7 @@ static int fuel_gauge_set_property(struct power_supply *ps,
                const union power_supply_propval *val)
 {
        struct axp288_fg_info *info = power_supply_get_drvdata(ps);
-       int ret = 0;
+       int new_low_cap, ret = 0;
 
        mutex_lock(&info->lock);
        switch (prop) {
@@ -536,12 +407,12 @@ static int fuel_gauge_set_property(struct power_supply *ps,
                        ret = -EINVAL;
                        break;
                }
-               ret = fuel_gauge_reg_readb(info, AXP288_FG_LOW_CAP_REG);
-               if (ret < 0)
-                       break;
-               ret &= 0xf0;
-               ret |= (val->intval & 0xf);
-               ret = fuel_gauge_reg_writeb(info, AXP288_FG_LOW_CAP_REG, ret);
+               new_low_cap = info->low_cap;
+               new_low_cap &= 0xf0;
+               new_low_cap |= (val->intval & 0xf);
+               ret = fuel_gauge_reg_writeb(info, AXP288_FG_LOW_CAP_REG, new_low_cap);
+               if (ret == 0)
+                       info->low_cap = new_low_cap;
                break;
        default:
                ret = -EINVAL;
@@ -579,37 +450,35 @@ static irqreturn_t fuel_gauge_thread_handler(int irq, void *dev)
        }
 
        if (i >= AXP288_FG_INTR_NUM) {
-               dev_warn(&info->pdev->dev, "spurious interrupt!!\n");
+               dev_warn(info->dev, "spurious interrupt!!\n");
                return IRQ_NONE;
        }
 
        switch (i) {
        case QWBTU_IRQ:
-               dev_info(&info->pdev->dev,
-                       "Quit Battery under temperature in work mode IRQ (QWBTU)\n");
+               dev_info(info->dev, "Quit Battery under temperature in work mode IRQ (QWBTU)\n");
                break;
        case WBTU_IRQ:
-               dev_info(&info->pdev->dev,
-                       "Battery under temperature in work mode IRQ (WBTU)\n");
+               dev_info(info->dev, "Battery under temperature in work mode IRQ (WBTU)\n");
                break;
        case QWBTO_IRQ:
-               dev_info(&info->pdev->dev,
-                       "Quit Battery over temperature in work mode IRQ (QWBTO)\n");
+               dev_info(info->dev, "Quit Battery over temperature in work mode IRQ (QWBTO)\n");
                break;
        case WBTO_IRQ:
-               dev_info(&info->pdev->dev,
-                       "Battery over temperature in work mode IRQ (WBTO)\n");
+               dev_info(info->dev, "Battery over temperature in work mode IRQ (WBTO)\n");
                break;
        case WL2_IRQ:
-               dev_info(&info->pdev->dev, "Low Batt Warning(2) INTR\n");
+               dev_info(info->dev, "Low Batt Warning(2) INTR\n");
                break;
        case WL1_IRQ:
-               dev_info(&info->pdev->dev, "Low Batt Warning(1) INTR\n");
+               dev_info(info->dev, "Low Batt Warning(1) INTR\n");
                break;
        default:
-               dev_warn(&info->pdev->dev, "Spurious Interrupt!!!\n");
+               dev_warn(info->dev, "Spurious Interrupt!!!\n");
        }
 
+       info->valid = 0; /* Force updating of the cached registers */
+
        power_supply_changed(info->bat);
        return IRQ_HANDLED;
 }
@@ -618,6 +487,7 @@ static void fuel_gauge_external_power_changed(struct power_supply *psy)
 {
        struct axp288_fg_info *info = power_supply_get_drvdata(psy);
 
+       info->valid = 0; /* Force updating of the cached registers */
        power_supply_changed(info->bat);
 }
 
@@ -632,16 +502,15 @@ static const struct power_supply_desc fuel_gauge_desc = {
        .external_power_changed = fuel_gauge_external_power_changed,
 };
 
-static void fuel_gauge_init_irq(struct axp288_fg_info *info)
+static void fuel_gauge_init_irq(struct axp288_fg_info *info, struct platform_device *pdev)
 {
        int ret, i, pirq;
 
        for (i = 0; i < AXP288_FG_INTR_NUM; i++) {
-               pirq = platform_get_irq(info->pdev, i);
+               pirq = platform_get_irq(pdev, i);
                info->irq[i] = regmap_irq_get_virq(info->regmap_irqc, pirq);
                if (info->irq[i] < 0) {
-                       dev_warn(&info->pdev->dev,
-                               "regmap_irq get virq failed for IRQ %d: %d\n",
+                       dev_warn(info->dev, "regmap_irq get virq failed for IRQ %d: %d\n",
                                pirq, info->irq[i]);
                        info->irq[i] = -1;
                        goto intr_failed;
@@ -650,14 +519,10 @@ static void fuel_gauge_init_irq(struct axp288_fg_info *info)
                                NULL, fuel_gauge_thread_handler,
                                IRQF_ONESHOT, DEV_NAME, info);
                if (ret) {
-                       dev_warn(&info->pdev->dev,
-                               "request irq failed for IRQ %d: %d\n",
+                       dev_warn(info->dev, "request irq failed for IRQ %d: %d\n",
                                pirq, info->irq[i]);
                        info->irq[i] = -1;
                        goto intr_failed;
-               } else {
-                       dev_info(&info->pdev->dev, "HW IRQ %d -> VIRQ %d\n",
-                               pirq, info->irq[i]);
                }
        }
        return;
@@ -753,9 +618,6 @@ static int axp288_fuel_gauge_probe(struct platform_device *pdev)
        struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
        struct power_supply_config psy_cfg = {};
        static const char * const iio_chan_name[] = {
-               [BAT_TEMP] = "axp288-batt-temp",
-               [PMIC_TEMP] = "axp288-pmic-temp",
-               [SYSTEM_TEMP] = "axp288-system-temp",
                [BAT_CHRG_CURR] = "axp288-chrg-curr",
                [BAT_D_CURR] = "axp288-chrg-d-curr",
                [BAT_VOLT] = "axp288-batt-volt",
@@ -765,24 +627,15 @@ static int axp288_fuel_gauge_probe(struct platform_device *pdev)
        if (dmi_check_system(axp288_no_battery_list))
                return -ENODEV;
 
-       /*
-        * On some devices the fuelgauge and charger parts of the axp288 are
-        * not used, check that the fuelgauge is enabled (CC_CTRL != 0).
-        */
-       ret = regmap_read(axp20x->regmap, AXP20X_CC_CTRL, &val);
-       if (ret < 0)
-               return ret;
-       if (val == 0)
-               return -ENODEV;
-
        info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
        if (!info)
                return -ENOMEM;
 
-       info->pdev = pdev;
+       info->dev = &pdev->dev;
        info->regmap = axp20x->regmap;
        info->regmap_irqc = axp20x->regmap_irqc;
        info->status = POWER_SUPPLY_STATUS_UNKNOWN;
+       info->valid = 0;
 
        platform_set_drvdata(pdev, info);
 
@@ -808,19 +661,35 @@ static int axp288_fuel_gauge_probe(struct platform_device *pdev)
                }
        }
 
-       ret = fuel_gauge_reg_readb(info, AXP288_FG_DES_CAP1_REG);
+       ret = iosf_mbi_block_punit_i2c_access();
        if (ret < 0)
                goto out_free_iio_chan;
 
+       /*
+        * On some devices the fuelgauge and charger parts of the axp288 are
+        * not used, check that the fuelgauge is enabled (CC_CTRL != 0).
+        */
+       ret = regmap_read(axp20x->regmap, AXP20X_CC_CTRL, &val);
+       if (ret < 0)
+               goto unblock_punit_i2c_access;
+       if (val == 0) {
+               ret = -ENODEV;
+               goto unblock_punit_i2c_access;
+       }
+
+       ret = fuel_gauge_reg_readb(info, AXP288_FG_DES_CAP1_REG);
+       if (ret < 0)
+               goto unblock_punit_i2c_access;
+
        if (!(ret & FG_DES_CAP1_VALID)) {
                dev_err(&pdev->dev, "axp288 not configured by firmware\n");
                ret = -ENODEV;
-               goto out_free_iio_chan;
+               goto unblock_punit_i2c_access;
        }
 
        ret = fuel_gauge_reg_readb(info, AXP20X_CHRG_CTRL1);
        if (ret < 0)
-               goto out_free_iio_chan;
+               goto unblock_punit_i2c_access;
        switch ((ret & CHRG_CCCV_CV_MASK) >> CHRG_CCCV_CV_BIT_POS) {
        case CHRG_CCCV_CV_4100MV:
                info->max_volt = 4100;
@@ -836,6 +705,22 @@ static int axp288_fuel_gauge_probe(struct platform_device *pdev)
                break;
        }
 
+       ret = fuel_gauge_reg_readb(info, AXP20X_PWR_OP_MODE);
+       if (ret < 0)
+               goto unblock_punit_i2c_access;
+       info->pwr_op = ret;
+
+       ret = fuel_gauge_reg_readb(info, AXP288_FG_LOW_CAP_REG);
+       if (ret < 0)
+               goto unblock_punit_i2c_access;
+       info->low_cap = ret;
+
+unblock_punit_i2c_access:
+       iosf_mbi_unblock_punit_i2c_access();
+       /* In case we arrive here by goto because of a register access error */
+       if (ret < 0)
+               goto out_free_iio_chan;
+
        psy_cfg.drv_data = info;
        info->bat = power_supply_register(&pdev->dev, &fuel_gauge_desc, &psy_cfg);
        if (IS_ERR(info->bat)) {
@@ -844,8 +729,7 @@ static int axp288_fuel_gauge_probe(struct platform_device *pdev)
                goto out_free_iio_chan;
        }
 
-       fuel_gauge_create_debugfs(info);
-       fuel_gauge_init_irq(info);
+       fuel_gauge_init_irq(info, pdev);
 
        return 0;
 
@@ -869,7 +753,6 @@ static int axp288_fuel_gauge_remove(struct platform_device *pdev)
        int i;
 
        power_supply_unregister(info->bat);
-       fuel_gauge_remove_debugfs(info);
 
        for (i = 0; i < AXP288_FG_INTR_NUM; i++)
                if (info->irq[i] >= 0)
index b5d619d..3ce36d0 100644 (file)
@@ -31,9 +31,8 @@
 
 #include <linux/power/bq24735-charger.h>
 
-#define BQ24735_CHG_OPT                        0x12
-#define BQ24735_CHG_OPT_CHARGE_DISABLE (1 << 0)
-#define BQ24735_CHG_OPT_AC_PRESENT     (1 << 4)
+/* BQ24735 available commands and their respective masks */
+#define BQ24735_CHARGE_OPT             0x12
 #define BQ24735_CHARGE_CURRENT         0x14
 #define BQ24735_CHARGE_CURRENT_MASK    0x1fc0
 #define BQ24735_CHARGE_VOLTAGE         0x15
 #define BQ24735_MANUFACTURER_ID                0xfe
 #define BQ24735_DEVICE_ID              0xff
 
+/* ChargeOptions bits of interest */
+#define BQ24735_CHARGE_OPT_CHG_DISABLE (1 << 0)
+#define BQ24735_CHARGE_OPT_AC_PRESENT  (1 << 4)
+
 struct bq24735 {
        struct power_supply             *charger;
        struct power_supply_desc        charger_desc;
@@ -167,8 +170,8 @@ static inline int bq24735_enable_charging(struct bq24735 *charger)
        if (ret)
                return ret;
 
-       return bq24735_update_word(charger->client, BQ24735_CHG_OPT,
-                                  BQ24735_CHG_OPT_CHARGE_DISABLE, 0);
+       return bq24735_update_word(charger->client, BQ24735_CHARGE_OPT,
+                                  BQ24735_CHARGE_OPT_CHG_DISABLE, 0);
 }
 
 static inline int bq24735_disable_charging(struct bq24735 *charger)
@@ -176,9 +179,9 @@ static inline int bq24735_disable_charging(struct bq24735 *charger)
        if (charger->pdata->ext_control)
                return 0;
 
-       return bq24735_update_word(charger->client, BQ24735_CHG_OPT,
-                                  BQ24735_CHG_OPT_CHARGE_DISABLE,
-                                  BQ24735_CHG_OPT_CHARGE_DISABLE);
+       return bq24735_update_word(charger->client, BQ24735_CHARGE_OPT,
+                                  BQ24735_CHARGE_OPT_CHG_DISABLE,
+                                  BQ24735_CHARGE_OPT_CHG_DISABLE);
 }
 
 static bool bq24735_charger_is_present(struct bq24735 *charger)
@@ -188,14 +191,14 @@ static bool bq24735_charger_is_present(struct bq24735 *charger)
        } else {
                int ac = 0;
 
-               ac = bq24735_read_word(charger->client, BQ24735_CHG_OPT);
+               ac = bq24735_read_word(charger->client, BQ24735_CHARGE_OPT);
                if (ac < 0) {
                        dev_dbg(&charger->client->dev,
                                "Failed to read charger options : %d\n",
                                ac);
                        return false;
                }
-               return (ac & BQ24735_CHG_OPT_AC_PRESENT) ? true : false;
+               return (ac & BQ24735_CHARGE_OPT_AC_PRESENT) ? true : false;
        }
 
        return false;
@@ -208,11 +211,11 @@ static int bq24735_charger_is_charging(struct bq24735 *charger)
        if (!bq24735_charger_is_present(charger))
                return 0;
 
-       ret  = bq24735_read_word(charger->client, BQ24735_CHG_OPT);
+       ret  = bq24735_read_word(charger->client, BQ24735_CHARGE_OPT);
        if (ret < 0)
                return ret;
 
-       return !(ret & BQ24735_CHG_OPT_CHARGE_DISABLE);
+       return !(ret & BQ24735_CHARGE_OPT_CHG_DISABLE);
 }
 
 static void bq24735_update(struct bq24735 *charger)
diff --git a/drivers/power/supply/cros_peripheral_charger.c b/drivers/power/supply/cros_peripheral_charger.c
new file mode 100644 (file)
index 0000000..305f10d
--- /dev/null
@@ -0,0 +1,386 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Power supply driver for ChromeOS EC based Peripheral Device Charger.
+ *
+ * Copyright 2020 Google LLC.
+ */
+
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/slab.h>
+#include <linux/stringify.h>
+#include <linux/types.h>
+
+#define DRV_NAME               "cros-ec-pchg"
+#define PCHG_DIR_PREFIX                "peripheral"
+#define PCHG_DIR_NAME          PCHG_DIR_PREFIX "%d"
+#define PCHG_DIR_NAME_LENGTH \
+               sizeof(PCHG_DIR_PREFIX __stringify(EC_PCHG_MAX_PORTS))
+#define PCHG_CACHE_UPDATE_DELAY        msecs_to_jiffies(500)
+
+struct port_data {
+       int port_number;
+       char name[PCHG_DIR_NAME_LENGTH];
+       struct power_supply *psy;
+       struct power_supply_desc psy_desc;
+       int psy_status;
+       int battery_percentage;
+       int charge_type;
+       struct charger_data *charger;
+       unsigned long last_update;
+};
+
+struct charger_data {
+       struct device *dev;
+       struct cros_ec_dev *ec_dev;
+       struct cros_ec_device *ec_device;
+       int num_registered_psy;
+       struct port_data *ports[EC_PCHG_MAX_PORTS];
+       struct notifier_block notifier;
+};
+
+static enum power_supply_property cros_pchg_props[] = {
+       POWER_SUPPLY_PROP_STATUS,
+       POWER_SUPPLY_PROP_CHARGE_TYPE,
+       POWER_SUPPLY_PROP_CAPACITY,
+       POWER_SUPPLY_PROP_SCOPE,
+};
+
+static int cros_pchg_ec_command(const struct charger_data *charger,
+                               unsigned int version,
+                               unsigned int command,
+                               const void *outdata,
+                               unsigned int outsize,
+                               void *indata,
+                               unsigned int insize)
+{
+       struct cros_ec_dev *ec_dev = charger->ec_dev;
+       struct cros_ec_command *msg;
+       int ret;
+
+       msg = kzalloc(sizeof(*msg) + max(outsize, insize), GFP_KERNEL);
+       if (!msg)
+               return -ENOMEM;
+
+       msg->version = version;
+       msg->command = ec_dev->cmd_offset + command;
+       msg->outsize = outsize;
+       msg->insize = insize;
+
+       if (outsize)
+               memcpy(msg->data, outdata, outsize);
+
+       ret = cros_ec_cmd_xfer_status(charger->ec_device, msg);
+       if (ret >= 0 && insize)
+               memcpy(indata, msg->data, insize);
+
+       kfree(msg);
+       return ret;
+}
+
+static const unsigned int pchg_cmd_version = 1;
+
+static bool cros_pchg_cmd_ver_check(const struct charger_data *charger)
+{
+       struct ec_params_get_cmd_versions_v1 req;
+       struct ec_response_get_cmd_versions rsp;
+       int ret;
+
+       req.cmd = EC_CMD_PCHG;
+       ret = cros_pchg_ec_command(charger, 1, EC_CMD_GET_CMD_VERSIONS,
+                                  &req, sizeof(req), &rsp, sizeof(rsp));
+       if (ret < 0) {
+               dev_warn(charger->dev,
+                        "Unable to get versions of EC_CMD_PCHG (err:%d)\n",
+                        ret);
+               return false;
+       }
+
+       return !!(rsp.version_mask & BIT(pchg_cmd_version));
+}
+
+static int cros_pchg_port_count(const struct charger_data *charger)
+{
+       struct ec_response_pchg_count rsp;
+       int ret;
+
+       ret = cros_pchg_ec_command(charger, 0, EC_CMD_PCHG_COUNT,
+                                  NULL, 0, &rsp, sizeof(rsp));
+       if (ret < 0) {
+               dev_warn(charger->dev,
+                        "Unable to get number or ports (err:%d)\n", ret);
+               return ret;
+       }
+
+       return rsp.port_count;
+}
+
+static int cros_pchg_get_status(struct port_data *port)
+{
+       struct charger_data *charger = port->charger;
+       struct ec_params_pchg req;
+       struct ec_response_pchg rsp;
+       struct device *dev = charger->dev;
+       int old_status = port->psy_status;
+       int old_percentage = port->battery_percentage;
+       int ret;
+
+       req.port = port->port_number;
+       ret = cros_pchg_ec_command(charger, pchg_cmd_version, EC_CMD_PCHG,
+                                  &req, sizeof(req), &rsp, sizeof(rsp));
+       if (ret < 0) {
+               dev_err(dev, "Unable to get port.%d status (err:%d)\n",
+                       port->port_number, ret);
+               return ret;
+       }
+
+       switch (rsp.state) {
+       case PCHG_STATE_RESET:
+       case PCHG_STATE_INITIALIZED:
+       case PCHG_STATE_ENABLED:
+       default:
+               port->psy_status = POWER_SUPPLY_STATUS_UNKNOWN;
+               port->charge_type = POWER_SUPPLY_CHARGE_TYPE_NONE;
+               break;
+       case PCHG_STATE_DETECTED:
+               port->psy_status = POWER_SUPPLY_STATUS_CHARGING;
+               port->charge_type = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+               break;
+       case PCHG_STATE_CHARGING:
+               port->psy_status = POWER_SUPPLY_STATUS_CHARGING;
+               port->charge_type = POWER_SUPPLY_CHARGE_TYPE_STANDARD;
+               break;
+       case PCHG_STATE_FULL:
+               port->psy_status = POWER_SUPPLY_STATUS_FULL;
+               port->charge_type = POWER_SUPPLY_CHARGE_TYPE_NONE;
+               break;
+       }
+
+       port->battery_percentage = rsp.battery_percentage;
+
+       if (port->psy_status != old_status ||
+                       port->battery_percentage != old_percentage)
+               power_supply_changed(port->psy);
+
+       dev_dbg(dev,
+               "Port %d: state=%d battery=%d%%\n",
+               port->port_number, rsp.state, rsp.battery_percentage);
+
+       return 0;
+}
+
+static int cros_pchg_get_port_status(struct port_data *port, bool ratelimit)
+{
+       int ret;
+
+       if (ratelimit &&
+           time_is_after_jiffies(port->last_update + PCHG_CACHE_UPDATE_DELAY))
+               return 0;
+
+       ret = cros_pchg_get_status(port);
+       if (ret < 0)
+               return ret;
+
+       port->last_update = jiffies;
+
+       return ret;
+}
+
+static int cros_pchg_get_prop(struct power_supply *psy,
+                             enum power_supply_property psp,
+                             union power_supply_propval *val)
+{
+       struct port_data *port = power_supply_get_drvdata(psy);
+
+       switch (psp) {
+       case POWER_SUPPLY_PROP_STATUS:
+       case POWER_SUPPLY_PROP_CAPACITY:
+       case POWER_SUPPLY_PROP_CHARGE_TYPE:
+               cros_pchg_get_port_status(port, true);
+               break;
+       default:
+               break;
+       }
+
+       switch (psp) {
+       case POWER_SUPPLY_PROP_STATUS:
+               val->intval = port->psy_status;
+               break;
+       case POWER_SUPPLY_PROP_CAPACITY:
+               val->intval = port->battery_percentage;
+               break;
+       case POWER_SUPPLY_PROP_CHARGE_TYPE:
+               val->intval = port->charge_type;
+               break;
+       case POWER_SUPPLY_PROP_SCOPE:
+               val->intval = POWER_SUPPLY_SCOPE_DEVICE;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int cros_pchg_event(const struct charger_data *charger,
+                          unsigned long host_event)
+{
+       int i;
+
+       for (i = 0; i < charger->num_registered_psy; i++)
+               cros_pchg_get_port_status(charger->ports[i], false);
+
+       return NOTIFY_OK;
+}
+
+static u32 cros_get_device_event(const struct charger_data *charger)
+{
+       struct ec_params_device_event req;
+       struct ec_response_device_event rsp;
+       struct device *dev = charger->dev;
+       int ret;
+
+       req.param = EC_DEVICE_EVENT_PARAM_GET_CURRENT_EVENTS;
+       ret = cros_pchg_ec_command(charger, 0, EC_CMD_DEVICE_EVENT,
+                                  &req, sizeof(req), &rsp, sizeof(rsp));
+       if (ret < 0) {
+               dev_warn(dev, "Unable to get device events (err:%d)\n", ret);
+               return 0;
+       }
+
+       return rsp.event_mask;
+}
+
+static int cros_ec_notify(struct notifier_block *nb,
+                         unsigned long queued_during_suspend,
+                         void *data)
+{
+       struct cros_ec_device *ec_dev = (struct cros_ec_device *)data;
+       u32 host_event = cros_ec_get_host_event(ec_dev);
+       struct charger_data *charger =
+                       container_of(nb, struct charger_data, notifier);
+       u32 device_event_mask;
+
+       if (!host_event)
+               return NOTIFY_DONE;
+
+       if (!(host_event & EC_HOST_EVENT_MASK(EC_HOST_EVENT_DEVICE)))
+               return NOTIFY_DONE;
+
+       /*
+        * todo: Retrieve device event mask in common place
+        * (e.g. cros_ec_proto.c).
+        */
+       device_event_mask = cros_get_device_event(charger);
+       if (!(device_event_mask & EC_DEVICE_EVENT_MASK(EC_DEVICE_EVENT_WLC)))
+               return NOTIFY_DONE;
+
+       return cros_pchg_event(charger, host_event);
+}
+
+static int cros_pchg_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct cros_ec_dev *ec_dev = dev_get_drvdata(dev->parent);
+       struct cros_ec_device *ec_device = ec_dev->ec_dev;
+       struct power_supply_desc *psy_desc;
+       struct charger_data *charger;
+       struct power_supply *psy;
+       struct port_data *port;
+       struct notifier_block *nb;
+       int num_ports;
+       int ret;
+       int i;
+
+       charger = devm_kzalloc(dev, sizeof(*charger), GFP_KERNEL);
+       if (!charger)
+               return -ENOMEM;
+
+       charger->dev = dev;
+       charger->ec_dev = ec_dev;
+       charger->ec_device = ec_device;
+
+       ret = cros_pchg_port_count(charger);
+       if (ret <= 0) {
+               /*
+                * This feature is enabled by the EC and the kernel driver is
+                * included by default for CrOS devices. Don't need to be loud
+                * since this error can be normal.
+                */
+               dev_info(dev, "No peripheral charge ports (err:%d)\n", ret);
+               return -ENODEV;
+       }
+
+       if (!cros_pchg_cmd_ver_check(charger)) {
+               dev_err(dev, "EC_CMD_PCHG version %d isn't available.\n",
+                       pchg_cmd_version);
+               return -EOPNOTSUPP;
+       }
+
+       num_ports = ret;
+       if (num_ports > EC_PCHG_MAX_PORTS) {
+               dev_err(dev, "Too many peripheral charge ports (%d)\n",
+                       num_ports);
+               return -ENOBUFS;
+       }
+
+       dev_info(dev, "%d peripheral charge ports found\n", num_ports);
+
+       for (i = 0; i < num_ports; i++) {
+               struct power_supply_config psy_cfg = {};
+
+               port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+               if (!port)
+                       return -ENOMEM;
+
+               port->charger = charger;
+               port->port_number = i;
+               snprintf(port->name, sizeof(port->name), PCHG_DIR_NAME, i);
+
+               psy_desc = &port->psy_desc;
+               psy_desc->name = port->name;
+               psy_desc->type = POWER_SUPPLY_TYPE_BATTERY;
+               psy_desc->get_property = cros_pchg_get_prop;
+               psy_desc->external_power_changed = NULL;
+               psy_desc->properties = cros_pchg_props;
+               psy_desc->num_properties = ARRAY_SIZE(cros_pchg_props);
+               psy_cfg.drv_data = port;
+
+               psy = devm_power_supply_register(dev, psy_desc, &psy_cfg);
+               if (IS_ERR(psy))
+                       return dev_err_probe(dev, PTR_ERR(psy),
+                                       "Failed to register power supply\n");
+               port->psy = psy;
+
+               charger->ports[charger->num_registered_psy++] = port;
+       }
+
+       if (!charger->num_registered_psy)
+               return -ENODEV;
+
+       nb = &charger->notifier;
+       nb->notifier_call = cros_ec_notify;
+       ret = blocking_notifier_chain_register(&ec_dev->ec_dev->event_notifier,
+                                              nb);
+       if (ret < 0)
+               dev_err(dev, "Failed to register notifier (err:%d)\n", ret);
+
+       return 0;
+}
+
+static struct platform_driver cros_pchg_driver = {
+       .driver = {
+               .name = DRV_NAME,
+       },
+       .probe = cros_pchg_probe
+};
+
+module_platform_driver(cros_pchg_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ChromeOS EC peripheral device charger");
+MODULE_ALIAS("platform:" DRV_NAME);
index d110597..091868e 100644 (file)
@@ -679,7 +679,9 @@ static int cw_bat_probe(struct i2c_client *client)
                                                    &cw2015_bat_desc,
                                                    &psy_cfg);
        if (IS_ERR(cw_bat->rk_bat)) {
-               dev_err(cw_bat->dev, "Failed to register power supply\n");
+               /* try again if this happens */
+               dev_err_probe(&client->dev, PTR_ERR(cw_bat->rk_bat),
+                       "Failed to register power supply\n");
                return PTR_ERR(cw_bat->rk_bat);
        }
 
index ce2041b..8dffae7 100644 (file)
@@ -36,8 +36,6 @@
 
 /* Interrupt mask bits */
 #define CONFIG_ALRT_BIT_ENBL   (1 << 2)
-#define STATUS_INTR_SOCMIN_BIT (1 << 10)
-#define STATUS_INTR_SOCMAX_BIT (1 << 14)
 
 #define VFSOC0_LOCK            0x0000
 #define VFSOC0_UNLOCK          0x0080
@@ -285,8 +283,6 @@ static int max17042_get_property(struct power_supply *psy,
        case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
                if (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17042)
                        ret = regmap_read(map, MAX17042_V_empty, &data);
-               else if (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17055)
-                       ret = regmap_read(map, MAX17055_V_empty, &data);
                else
                        ret = regmap_read(map, MAX17047_V_empty, &data);
                if (ret < 0)
@@ -748,7 +744,7 @@ static inline void max17042_override_por_values(struct max17042_chip *chip)
        struct max17042_config_data *config = chip->pdata->config_data;
 
        max17042_override_por(map, MAX17042_TGAIN, config->tgain);
-       max17042_override_por(map, MAx17042_TOFF, config->toff);
+       max17042_override_por(map, MAX17042_TOFF, config->toff);
        max17042_override_por(map, MAX17042_CGAIN, config->cgain);
        max17042_override_por(map, MAX17042_COFF, config->coff);
 
@@ -767,36 +763,36 @@ static inline void max17042_override_por_values(struct max17042_chip *chip)
        max17042_override_por(map, MAX17042_FilterCFG, config->filter_cfg);
        max17042_override_por(map, MAX17042_RelaxCFG, config->relax_cfg);
        max17042_override_por(map, MAX17042_MiscCFG, config->misc_cfg);
-       max17042_override_por(map, MAX17042_MaskSOC, config->masksoc);
 
        max17042_override_por(map, MAX17042_FullCAP, config->fullcap);
        max17042_override_por(map, MAX17042_FullCAPNom, config->fullcapnom);
-       if (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17042)
-               max17042_override_por(map, MAX17042_SOC_empty,
-                                               config->socempty);
-       max17042_override_por(map, MAX17042_LAvg_empty, config->lavg_empty);
        max17042_override_por(map, MAX17042_dQacc, config->dqacc);
        max17042_override_por(map, MAX17042_dPacc, config->dpacc);
 
-       if (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17042)
-               max17042_override_por(map, MAX17042_V_empty, config->vempty);
-       if (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17055)
-               max17042_override_por(map, MAX17055_V_empty, config->vempty);
-       else
-               max17042_override_por(map, MAX17047_V_empty, config->vempty);
-       max17042_override_por(map, MAX17042_TempNom, config->temp_nom);
-       max17042_override_por(map, MAX17042_TempLim, config->temp_lim);
-       max17042_override_por(map, MAX17042_FCTC, config->fctc);
        max17042_override_por(map, MAX17042_RCOMP0, config->rcomp0);
        max17042_override_por(map, MAX17042_TempCo, config->tcompc0);
-       if (chip->chip_type &&
-           ((chip->chip_type == MAXIM_DEVICE_TYPE_MAX17042) ||
+
+       if (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17042) {
+               max17042_override_por(map, MAX17042_MaskSOC, config->masksoc);
+               max17042_override_por(map, MAX17042_SOC_empty, config->socempty);
+               max17042_override_por(map, MAX17042_V_empty, config->vempty);
+               max17042_override_por(map, MAX17042_EmptyTempCo, config->empty_tempco);
+               max17042_override_por(map, MAX17042_K_empty0, config->kempty0);
+       }
+
+       if ((chip->chip_type == MAXIM_DEVICE_TYPE_MAX17042) ||
            (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17047) ||
-           (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17050))) {
-               max17042_override_por(map, MAX17042_EmptyTempCo,
-                                               config->empty_tempco);
-               max17042_override_por(map, MAX17042_K_empty0,
-                                               config->kempty0);
+           (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17050)) {
+               max17042_override_por(map, MAX17042_LAvg_empty, config->lavg_empty);
+               max17042_override_por(map, MAX17042_TempNom, config->temp_nom);
+               max17042_override_por(map, MAX17042_TempLim, config->temp_lim);
+               max17042_override_por(map, MAX17042_FCTC, config->fctc);
+       }
+
+       if ((chip->chip_type == MAXIM_DEVICE_TYPE_MAX17047) ||
+           (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17050) ||
+           (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17055)) {
+               max17042_override_por(map, MAX17047_V_empty, config->vempty);
        }
 }
 
@@ -869,11 +865,14 @@ static irqreturn_t max17042_thread_handler(int id, void *dev)
 {
        struct max17042_chip *chip = dev;
        u32 val;
+       int ret;
 
-       regmap_read(chip->regmap, MAX17042_STATUS, &val);
-       if ((val & STATUS_INTR_SOCMIN_BIT) ||
-               (val & STATUS_INTR_SOCMAX_BIT)) {
-               dev_info(&chip->client->dev, "SOC threshold INTR\n");
+       ret = regmap_read(chip->regmap, MAX17042_STATUS, &val);
+       if (ret)
+               return IRQ_HANDLED;
+
+       if ((val & STATUS_SMN_BIT) || (val & STATUS_SMX_BIT)) {
+               dev_dbg(&chip->client->dev, "SOC threshold INTR\n");
                max17042_set_soc_threshold(chip, 1);
        }
 
@@ -1196,6 +1195,7 @@ static const struct of_device_id max17042_dt_match[] = {
        { .compatible = "maxim,max17047" },
        { .compatible = "maxim,max17050" },
        { .compatible = "maxim,max17055" },
+       { .compatible = "maxim,max77849-battery" },
        { },
 };
 MODULE_DEVICE_TABLE(of, max17042_dt_match);
@@ -1206,6 +1206,7 @@ static const struct i2c_device_id max17042_id[] = {
        { "max17047", MAXIM_DEVICE_TYPE_MAX17047 },
        { "max17050", MAXIM_DEVICE_TYPE_MAX17050 },
        { "max17055", MAXIM_DEVICE_TYPE_MAX17055 },
+       { "max77849-battery", MAXIM_DEVICE_TYPE_MAX17047 },
        { }
 };
 MODULE_DEVICE_TABLE(i2c, max17042_id);
diff --git a/drivers/power/supply/mt6360_charger.c b/drivers/power/supply/mt6360_charger.c
new file mode 100644 (file)
index 0000000..3abaa72
--- /dev/null
@@ -0,0 +1,867 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ */
+
+#include <linux/devm-helpers.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/linear_range.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+
+#define MT6360_PMU_CHG_CTRL1   0x311
+#define MT6360_PMU_CHG_CTRL2   0x312
+#define MT6360_PMU_CHG_CTRL3   0x313
+#define MT6360_PMU_CHG_CTRL4   0x314
+#define MT6360_PMU_CHG_CTRL5   0x315
+#define MT6360_PMU_CHG_CTRL6   0x316
+#define MT6360_PMU_CHG_CTRL7   0x317
+#define MT6360_PMU_CHG_CTRL8   0x318
+#define MT6360_PMU_CHG_CTRL9   0x319
+#define MT6360_PMU_CHG_CTRL10  0x31A
+#define MT6360_PMU_DEVICE_TYPE 0x322
+#define MT6360_PMU_USB_STATUS1 0x327
+#define MT6360_PMU_CHG_STAT    0x34A
+#define MT6360_PMU_CHG_CTRL19  0x361
+#define MT6360_PMU_FOD_STAT    0x3E7
+
+/* MT6360_PMU_CHG_CTRL1 */
+#define MT6360_FSLP_SHFT       (3)
+#define MT6360_FSLP_MASK       BIT(MT6360_FSLP_SHFT)
+#define MT6360_OPA_MODE_SHFT   (0)
+#define MT6360_OPA_MODE_MASK   BIT(MT6360_OPA_MODE_SHFT)
+/* MT6360_PMU_CHG_CTRL2 */
+#define MT6360_IINLMTSEL_SHFT  (2)
+#define MT6360_IINLMTSEL_MASK  GENMASK(3, 2)
+/* MT6360_PMU_CHG_CTRL3 */
+#define MT6360_IAICR_SHFT      (2)
+#define MT6360_IAICR_MASK      GENMASK(7, 2)
+#define MT6360_ILIM_EN_MASK    BIT(0)
+/* MT6360_PMU_CHG_CTRL4 */
+#define MT6360_VOREG_SHFT      (1)
+#define MT6360_VOREG_MASK      GENMASK(7, 1)
+/* MT6360_PMU_CHG_CTRL5 */
+#define MT6360_VOBST_MASK      GENMASK(7, 2)
+/* MT6360_PMU_CHG_CTRL6 */
+#define MT6360_VMIVR_SHFT      (1)
+#define MT6360_VMIVR_MASK      GENMASK(7, 1)
+/* MT6360_PMU_CHG_CTRL7 */
+#define MT6360_ICHG_SHFT       (2)
+#define MT6360_ICHG_MASK       GENMASK(7, 2)
+/* MT6360_PMU_CHG_CTRL8 */
+#define MT6360_IPREC_SHFT      (0)
+#define MT6360_IPREC_MASK      GENMASK(3, 0)
+/* MT6360_PMU_CHG_CTRL9 */
+#define MT6360_IEOC_SHFT       (4)
+#define MT6360_IEOC_MASK       GENMASK(7, 4)
+/* MT6360_PMU_CHG_CTRL10 */
+#define MT6360_OTG_OC_MASK     GENMASK(3, 0)
+/* MT6360_PMU_DEVICE_TYPE */
+#define MT6360_USBCHGEN_MASK   BIT(7)
+/* MT6360_PMU_USB_STATUS1 */
+#define MT6360_USB_STATUS_SHFT (4)
+#define MT6360_USB_STATUS_MASK GENMASK(6, 4)
+/* MT6360_PMU_CHG_STAT */
+#define MT6360_CHG_STAT_SHFT   (6)
+#define MT6360_CHG_STAT_MASK   GENMASK(7, 6)
+#define MT6360_VBAT_LVL_MASK   BIT(5)
+/* MT6360_PMU_CHG_CTRL19 */
+#define MT6360_VINOVP_SHFT     (5)
+#define MT6360_VINOVP_MASK     GENMASK(6, 5)
+/* MT6360_PMU_FOD_STAT */
+#define MT6360_CHRDET_EXT_MASK BIT(4)
+
+/* uV */
+#define MT6360_VMIVR_MIN       3900000
+#define MT6360_VMIVR_MAX       13400000
+#define MT6360_VMIVR_STEP      100000
+/* uA */
+#define MT6360_ICHG_MIN                100000
+#define MT6360_ICHG_MAX                5000000
+#define MT6360_ICHG_STEP       100000
+/* uV */
+#define MT6360_VOREG_MIN       3900000
+#define MT6360_VOREG_MAX       4710000
+#define MT6360_VOREG_STEP      10000
+/* uA */
+#define MT6360_AICR_MIN                100000
+#define MT6360_AICR_MAX                3250000
+#define MT6360_AICR_STEP       50000
+/* uA */
+#define MT6360_IPREC_MIN       100000
+#define MT6360_IPREC_MAX       850000
+#define MT6360_IPREC_STEP      50000
+/* uA */
+#define MT6360_IEOC_MIN                100000
+#define MT6360_IEOC_MAX                850000
+#define MT6360_IEOC_STEP       50000
+
+enum {
+       MT6360_RANGE_VMIVR,
+       MT6360_RANGE_ICHG,
+       MT6360_RANGE_VOREG,
+       MT6360_RANGE_AICR,
+       MT6360_RANGE_IPREC,
+       MT6360_RANGE_IEOC,
+       MT6360_RANGE_MAX,
+};
+
+#define MT6360_LINEAR_RANGE(idx, _min, _min_sel, _max_sel, _step) \
+       [idx] = REGULATOR_LINEAR_RANGE(_min, _min_sel, _max_sel, _step)
+
+static const struct linear_range mt6360_chg_range[MT6360_RANGE_MAX] = {
+       MT6360_LINEAR_RANGE(MT6360_RANGE_VMIVR, 3900000, 0, 0x5F, 100000),
+       MT6360_LINEAR_RANGE(MT6360_RANGE_ICHG, 100000, 0, 0x31, 100000),
+       MT6360_LINEAR_RANGE(MT6360_RANGE_VOREG, 3900000, 0, 0x51, 10000),
+       MT6360_LINEAR_RANGE(MT6360_RANGE_AICR, 100000, 0, 0x3F, 50000),
+       MT6360_LINEAR_RANGE(MT6360_RANGE_IPREC, 100000, 0, 0x0F, 50000),
+       MT6360_LINEAR_RANGE(MT6360_RANGE_IEOC, 100000, 0, 0x0F, 50000),
+};
+
+struct mt6360_chg_info {
+       struct device *dev;
+       struct regmap *regmap;
+       struct power_supply_desc psy_desc;
+       struct power_supply *psy;
+       struct regulator_dev *otg_rdev;
+       struct mutex chgdet_lock;
+       u32 vinovp;
+       bool pwr_rdy;
+       bool bc12_en;
+       int psy_usb_type;
+       struct work_struct chrdet_work;
+};
+
+enum mt6360_iinlmtsel {
+       MT6360_IINLMTSEL_AICR_3250 = 0,
+       MT6360_IINLMTSEL_CHG_TYPE,
+       MT6360_IINLMTSEL_AICR,
+       MT6360_IINLMTSEL_LOWER_LEVEL,
+};
+
+enum mt6360_pmu_chg_type {
+       MT6360_CHG_TYPE_NOVBUS = 0,
+       MT6360_CHG_TYPE_UNDER_GOING,
+       MT6360_CHG_TYPE_SDP,
+       MT6360_CHG_TYPE_SDPNSTD,
+       MT6360_CHG_TYPE_DCP,
+       MT6360_CHG_TYPE_CDP,
+       MT6360_CHG_TYPE_DISABLE_BC12,
+       MT6360_CHG_TYPE_MAX,
+};
+
+static enum power_supply_usb_type mt6360_charger_usb_types[] = {
+       POWER_SUPPLY_USB_TYPE_UNKNOWN,
+       POWER_SUPPLY_USB_TYPE_SDP,
+       POWER_SUPPLY_USB_TYPE_DCP,
+       POWER_SUPPLY_USB_TYPE_CDP,
+};
+
+static int mt6360_get_chrdet_ext_stat(struct mt6360_chg_info *mci,
+                                            bool *pwr_rdy)
+{
+       int ret;
+       unsigned int regval;
+
+       ret = regmap_read(mci->regmap, MT6360_PMU_FOD_STAT, &regval);
+       if (ret < 0)
+               return ret;
+       *pwr_rdy = (regval & MT6360_CHRDET_EXT_MASK) ? true : false;
+       return 0;
+}
+
+static int mt6360_charger_get_online(struct mt6360_chg_info *mci,
+                                    union power_supply_propval *val)
+{
+       int ret;
+       bool pwr_rdy;
+
+       ret = mt6360_get_chrdet_ext_stat(mci, &pwr_rdy);
+       if (ret < 0)
+               return ret;
+       val->intval = pwr_rdy ? true : false;
+       return 0;
+}
+
+static int mt6360_charger_get_status(struct mt6360_chg_info *mci,
+                                    union power_supply_propval *val)
+{
+       int status, ret;
+       unsigned int regval;
+       bool pwr_rdy;
+
+       ret = mt6360_get_chrdet_ext_stat(mci, &pwr_rdy);
+       if (ret < 0)
+               return ret;
+       if (!pwr_rdy) {
+               status = POWER_SUPPLY_STATUS_DISCHARGING;
+               goto out;
+       }
+
+       ret = regmap_read(mci->regmap, MT6360_PMU_CHG_STAT, &regval);
+       if (ret < 0)
+               return ret;
+       regval &= MT6360_CHG_STAT_MASK;
+       regval >>= MT6360_CHG_STAT_SHFT;
+       switch (regval) {
+       case 0x0:
+               status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+               break;
+       case 0x1:
+               status = POWER_SUPPLY_STATUS_CHARGING;
+               break;
+       case 0x2:
+               status = POWER_SUPPLY_STATUS_FULL;
+               break;
+       default:
+               ret = -EIO;
+       }
+out:
+       if (!ret)
+               val->intval = status;
+       return ret;
+}
+
+static int mt6360_charger_get_charge_type(struct mt6360_chg_info *mci,
+                                         union power_supply_propval *val)
+{
+       int type, ret;
+       unsigned int regval;
+       u8 chg_stat;
+
+       ret = regmap_read(mci->regmap, MT6360_PMU_CHG_STAT, &regval);
+       if (ret < 0)
+               return ret;
+
+       chg_stat = (regval & MT6360_CHG_STAT_MASK) >> MT6360_CHG_STAT_SHFT;
+       switch (chg_stat) {
+       case 0x01: /* Charge in Progress */
+               if (regval & MT6360_VBAT_LVL_MASK)
+                       type = POWER_SUPPLY_CHARGE_TYPE_FAST;
+               else
+                       type = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+               break;
+       case 0x00: /* Not Charging */
+       case 0x02: /* Charge Done */
+       case 0x03: /* Charge Fault */
+       default:
+               type = POWER_SUPPLY_CHARGE_TYPE_NONE;
+               break;
+       }
+
+       val->intval = type;
+       return 0;
+}
+
+static int mt6360_charger_get_ichg(struct mt6360_chg_info *mci,
+                                  union power_supply_propval *val)
+{
+       int ret;
+       u32 sel, value;
+
+       ret = regmap_read(mci->regmap, MT6360_PMU_CHG_CTRL7, &sel);
+       if (ret < 0)
+               return ret;
+       sel = (sel & MT6360_ICHG_MASK) >> MT6360_ICHG_SHFT;
+       ret = linear_range_get_value(&mt6360_chg_range[MT6360_RANGE_ICHG], sel, &value);
+       if (!ret)
+               val->intval = value;
+       return ret;
+}
+
+static int mt6360_charger_get_max_ichg(struct mt6360_chg_info *mci,
+                                      union power_supply_propval *val)
+{
+       val->intval = MT6360_ICHG_MAX;
+       return 0;
+}
+
+static int mt6360_charger_get_cv(struct mt6360_chg_info *mci,
+                                union power_supply_propval *val)
+{
+       int ret;
+       u32 sel, value;
+
+       ret = regmap_read(mci->regmap, MT6360_PMU_CHG_CTRL4, &sel);
+       if (ret < 0)
+               return ret;
+       sel = (sel & MT6360_VOREG_MASK) >> MT6360_VOREG_SHFT;
+       ret = linear_range_get_value(&mt6360_chg_range[MT6360_RANGE_VOREG], sel, &value);
+       if (!ret)
+               val->intval = value;
+       return ret;
+}
+
+static int mt6360_charger_get_max_cv(struct mt6360_chg_info *mci,
+                                    union power_supply_propval *val)
+{
+       val->intval = MT6360_VOREG_MAX;
+       return 0;
+}
+
+static int mt6360_charger_get_aicr(struct mt6360_chg_info *mci,
+                                  union power_supply_propval *val)
+{
+       int ret;
+       u32 sel, value;
+
+       ret = regmap_read(mci->regmap, MT6360_PMU_CHG_CTRL3, &sel);
+       if (ret < 0)
+               return ret;
+       sel = (sel & MT6360_IAICR_MASK) >> MT6360_IAICR_SHFT;
+       ret = linear_range_get_value(&mt6360_chg_range[MT6360_RANGE_AICR], sel, &value);
+       if (!ret)
+               val->intval = value;
+       return ret;
+}
+
+static int mt6360_charger_get_mivr(struct mt6360_chg_info *mci,
+                                  union power_supply_propval *val)
+{
+       int ret;
+       u32 sel, value;
+
+       ret = regmap_read(mci->regmap, MT6360_PMU_CHG_CTRL6, &sel);
+       if (ret < 0)
+               return ret;
+       sel = (sel & MT6360_VMIVR_MASK) >> MT6360_VMIVR_SHFT;
+       ret = linear_range_get_value(&mt6360_chg_range[MT6360_RANGE_VMIVR], sel, &value);
+       if (!ret)
+               val->intval = value;
+       return ret;
+}
+
+static int mt6360_charger_get_iprechg(struct mt6360_chg_info *mci,
+                                     union power_supply_propval *val)
+{
+       int ret;
+       u32 sel, value;
+
+       ret = regmap_read(mci->regmap, MT6360_PMU_CHG_CTRL8, &sel);
+       if (ret < 0)
+               return ret;
+       sel = (sel & MT6360_IPREC_MASK) >> MT6360_IPREC_SHFT;
+       ret = linear_range_get_value(&mt6360_chg_range[MT6360_RANGE_IPREC], sel, &value);
+       if (!ret)
+               val->intval = value;
+       return ret;
+}
+
+static int mt6360_charger_get_ieoc(struct mt6360_chg_info *mci,
+                                  union power_supply_propval *val)
+{
+       int ret;
+       u32 sel, value;
+
+       ret = regmap_read(mci->regmap, MT6360_PMU_CHG_CTRL9, &sel);
+       if (ret < 0)
+               return ret;
+       sel = (sel & MT6360_IEOC_MASK) >> MT6360_IEOC_SHFT;
+       ret = linear_range_get_value(&mt6360_chg_range[MT6360_RANGE_IEOC], sel, &value);
+       if (!ret)
+               val->intval = value;
+       return ret;
+}
+
+static int mt6360_charger_set_online(struct mt6360_chg_info *mci,
+                                    const union power_supply_propval *val)
+{
+       u8 force_sleep = val->intval ? 0 : 1;
+
+       return regmap_update_bits(mci->regmap,
+                                 MT6360_PMU_CHG_CTRL1,
+                                 MT6360_FSLP_MASK,
+                                 force_sleep << MT6360_FSLP_SHFT);
+}
+
+static int mt6360_charger_set_ichg(struct mt6360_chg_info *mci,
+                                  const union power_supply_propval *val)
+{
+       u32 sel;
+
+       linear_range_get_selector_within(&mt6360_chg_range[MT6360_RANGE_ICHG], val->intval, &sel);
+       return regmap_update_bits(mci->regmap,
+                                 MT6360_PMU_CHG_CTRL7,
+                                 MT6360_ICHG_MASK,
+                                 sel << MT6360_ICHG_SHFT);
+}
+
+static int mt6360_charger_set_cv(struct mt6360_chg_info *mci,
+                                const union power_supply_propval *val)
+{
+       u32 sel;
+
+       linear_range_get_selector_within(&mt6360_chg_range[MT6360_RANGE_VOREG], val->intval, &sel);
+       return regmap_update_bits(mci->regmap,
+                                 MT6360_PMU_CHG_CTRL4,
+                                 MT6360_VOREG_MASK,
+                                 sel << MT6360_VOREG_SHFT);
+}
+
+static int mt6360_charger_set_aicr(struct mt6360_chg_info *mci,
+                                  const union power_supply_propval *val)
+{
+       u32 sel;
+
+       linear_range_get_selector_within(&mt6360_chg_range[MT6360_RANGE_AICR], val->intval, &sel);
+       return regmap_update_bits(mci->regmap,
+                                 MT6360_PMU_CHG_CTRL3,
+                                 MT6360_IAICR_MASK,
+                                 sel << MT6360_IAICR_SHFT);
+}
+
+static int mt6360_charger_set_mivr(struct mt6360_chg_info *mci,
+                                  const union power_supply_propval *val)
+{
+       u32 sel;
+
+       linear_range_get_selector_within(&mt6360_chg_range[MT6360_RANGE_VMIVR], val->intval, &sel);
+       return regmap_update_bits(mci->regmap,
+                                 MT6360_PMU_CHG_CTRL3,
+                                 MT6360_VMIVR_MASK,
+                                 sel << MT6360_VMIVR_SHFT);
+}
+
+static int mt6360_charger_set_iprechg(struct mt6360_chg_info *mci,
+                                     const union power_supply_propval *val)
+{
+       u32 sel;
+
+       linear_range_get_selector_within(&mt6360_chg_range[MT6360_RANGE_IPREC], val->intval, &sel);
+       return regmap_update_bits(mci->regmap,
+                                 MT6360_PMU_CHG_CTRL8,
+                                 MT6360_IPREC_MASK,
+                                 sel << MT6360_IPREC_SHFT);
+}
+
+static int mt6360_charger_set_ieoc(struct mt6360_chg_info *mci,
+                                  const union power_supply_propval *val)
+{
+       u32 sel;
+
+       linear_range_get_selector_within(&mt6360_chg_range[MT6360_RANGE_IEOC], val->intval, &sel);
+       return regmap_update_bits(mci->regmap,
+                                 MT6360_PMU_CHG_CTRL9,
+                                 MT6360_IEOC_MASK,
+                                 sel << MT6360_IEOC_SHFT);
+}
+
+static int mt6360_charger_get_property(struct power_supply *psy,
+                                      enum power_supply_property psp,
+                                      union power_supply_propval *val)
+{
+       struct mt6360_chg_info *mci = power_supply_get_drvdata(psy);
+       int ret = 0;
+
+       switch (psp) {
+       case POWER_SUPPLY_PROP_ONLINE:
+               ret = mt6360_charger_get_online(mci, val);
+               break;
+       case POWER_SUPPLY_PROP_STATUS:
+               ret = mt6360_charger_get_status(mci, val);
+               break;
+       case POWER_SUPPLY_PROP_CHARGE_TYPE:
+               ret = mt6360_charger_get_charge_type(mci, val);
+               break;
+       case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+               ret = mt6360_charger_get_ichg(mci, val);
+               break;
+       case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+               ret = mt6360_charger_get_max_ichg(mci, val);
+               break;
+       case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+               ret = mt6360_charger_get_cv(mci, val);
+               break;
+       case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
+               ret = mt6360_charger_get_max_cv(mci, val);
+               break;
+       case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+               ret = mt6360_charger_get_aicr(mci, val);
+               break;
+       case POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT:
+               ret = mt6360_charger_get_mivr(mci, val);
+               break;
+       case POWER_SUPPLY_PROP_PRECHARGE_CURRENT:
+               ret = mt6360_charger_get_iprechg(mci, val);
+               break;
+       case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
+               ret = mt6360_charger_get_ieoc(mci, val);
+               break;
+       case POWER_SUPPLY_PROP_USB_TYPE:
+               val->intval = mci->psy_usb_type;
+               break;
+       default:
+               ret = -ENODATA;
+       }
+       return ret;
+}
+
+static int mt6360_charger_set_property(struct power_supply *psy,
+                                      enum power_supply_property psp,
+                                      const union power_supply_propval *val)
+{
+       struct mt6360_chg_info *mci = power_supply_get_drvdata(psy);
+       int ret;
+
+       switch (psp) {
+       case POWER_SUPPLY_PROP_ONLINE:
+               ret = mt6360_charger_set_online(mci, val);
+               break;
+       case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+               ret = mt6360_charger_set_ichg(mci, val);
+               break;
+       case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+               ret = mt6360_charger_set_cv(mci, val);
+               break;
+       case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+               ret = mt6360_charger_set_aicr(mci, val);
+               break;
+       case POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT:
+               ret = mt6360_charger_set_mivr(mci, val);
+               break;
+       case POWER_SUPPLY_PROP_PRECHARGE_CURRENT:
+               ret = mt6360_charger_set_iprechg(mci, val);
+               break;
+       case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
+               ret = mt6360_charger_set_ieoc(mci, val);
+               break;
+       default:
+               ret = -EINVAL;
+       }
+       return ret;
+}
+
+static int mt6360_charger_property_is_writeable(struct power_supply *psy,
+                                              enum power_supply_property psp)
+{
+       switch (psp) {
+       case POWER_SUPPLY_PROP_ONLINE:
+       case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+       case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+       case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+       case POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT:
+       case POWER_SUPPLY_PROP_PRECHARGE_CURRENT:
+       case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
+               return 1;
+       default:
+               return 0;
+       }
+}
+
+static enum power_supply_property mt6360_charger_properties[] = {
+       POWER_SUPPLY_PROP_ONLINE,
+       POWER_SUPPLY_PROP_STATUS,
+       POWER_SUPPLY_PROP_CHARGE_TYPE,
+       POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
+       POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+       POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
+       POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX,
+       POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
+       POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT,
+       POWER_SUPPLY_PROP_PRECHARGE_CURRENT,
+       POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT,
+       POWER_SUPPLY_PROP_USB_TYPE,
+};
+
+static const struct power_supply_desc mt6360_charger_desc = {
+       .type                   = POWER_SUPPLY_TYPE_USB,
+       .properties             = mt6360_charger_properties,
+       .num_properties         = ARRAY_SIZE(mt6360_charger_properties),
+       .get_property           = mt6360_charger_get_property,
+       .set_property           = mt6360_charger_set_property,
+       .property_is_writeable  = mt6360_charger_property_is_writeable,
+       .usb_types              = mt6360_charger_usb_types,
+       .num_usb_types          = ARRAY_SIZE(mt6360_charger_usb_types),
+};
+
+static const struct regulator_ops mt6360_chg_otg_ops = {
+       .list_voltage = regulator_list_voltage_linear,
+       .enable = regulator_enable_regmap,
+       .disable = regulator_disable_regmap,
+       .is_enabled = regulator_is_enabled_regmap,
+       .set_voltage_sel = regulator_set_voltage_sel_regmap,
+       .get_voltage_sel = regulator_get_voltage_sel_regmap,
+};
+
+static const struct regulator_desc mt6360_otg_rdesc = {
+       .of_match = "usb-otg-vbus",
+       .name = "usb-otg-vbus",
+       .ops = &mt6360_chg_otg_ops,
+       .owner = THIS_MODULE,
+       .type = REGULATOR_VOLTAGE,
+       .min_uV = 4425000,
+       .uV_step = 25000,
+       .n_voltages = 57,
+       .vsel_reg = MT6360_PMU_CHG_CTRL5,
+       .vsel_mask = MT6360_VOBST_MASK,
+       .enable_reg = MT6360_PMU_CHG_CTRL1,
+       .enable_mask = MT6360_OPA_MODE_MASK,
+};
+
+static irqreturn_t mt6360_pmu_attach_i_handler(int irq, void *data)
+{
+       struct mt6360_chg_info *mci = data;
+       int ret;
+       unsigned int usb_status;
+       int last_usb_type;
+
+       mutex_lock(&mci->chgdet_lock);
+       if (!mci->bc12_en) {
+               dev_warn(mci->dev, "Received attach interrupt, bc12 disabled, ignore irq\n");
+               goto out;
+       }
+       last_usb_type = mci->psy_usb_type;
+       /* Plug in */
+       ret = regmap_read(mci->regmap, MT6360_PMU_USB_STATUS1, &usb_status);
+       if (ret < 0)
+               goto out;
+       usb_status &= MT6360_USB_STATUS_MASK;
+       usb_status >>= MT6360_USB_STATUS_SHFT;
+       switch (usb_status) {
+       case MT6360_CHG_TYPE_NOVBUS:
+               dev_dbg(mci->dev, "Received attach interrupt, no vbus\n");
+               goto out;
+       case MT6360_CHG_TYPE_UNDER_GOING:
+               dev_dbg(mci->dev, "Received attach interrupt, under going...\n");
+               goto out;
+       case MT6360_CHG_TYPE_SDP:
+               mci->psy_usb_type = POWER_SUPPLY_USB_TYPE_SDP;
+               break;
+       case MT6360_CHG_TYPE_SDPNSTD:
+               mci->psy_usb_type = POWER_SUPPLY_USB_TYPE_SDP;
+               break;
+       case MT6360_CHG_TYPE_CDP:
+               mci->psy_usb_type = POWER_SUPPLY_USB_TYPE_CDP;
+               break;
+       case MT6360_CHG_TYPE_DCP:
+               mci->psy_usb_type = POWER_SUPPLY_USB_TYPE_DCP;
+               break;
+       case MT6360_CHG_TYPE_DISABLE_BC12:
+               dev_dbg(mci->dev, "Received attach interrupt, bc12 detect not enable\n");
+               goto out;
+       default:
+               mci->psy_usb_type = POWER_SUPPLY_USB_TYPE_UNKNOWN;
+               dev_dbg(mci->dev, "Received attach interrupt, reserved address\n");
+               goto out;
+       }
+
+       dev_dbg(mci->dev, "Received attach interrupt, chg_type = %d\n", mci->psy_usb_type);
+       if (last_usb_type != mci->psy_usb_type)
+               power_supply_changed(mci->psy);
+out:
+       mutex_unlock(&mci->chgdet_lock);
+       return IRQ_HANDLED;
+}
+
+static void mt6360_handle_chrdet_ext_evt(struct mt6360_chg_info *mci)
+{
+       int ret;
+       bool pwr_rdy;
+
+       mutex_lock(&mci->chgdet_lock);
+       ret = mt6360_get_chrdet_ext_stat(mci, &pwr_rdy);
+       if (ret < 0)
+               goto out;
+       if (mci->pwr_rdy == pwr_rdy) {
+               dev_dbg(mci->dev, "Received vbus interrupt, pwr_rdy is same(%d)\n", pwr_rdy);
+               goto out;
+       }
+       mci->pwr_rdy = pwr_rdy;
+       dev_dbg(mci->dev, "Received vbus interrupt, pwr_rdy = %d\n", pwr_rdy);
+       if (!pwr_rdy) {
+               mci->psy_usb_type = POWER_SUPPLY_USB_TYPE_UNKNOWN;
+               power_supply_changed(mci->psy);
+
+       }
+       ret = regmap_update_bits(mci->regmap,
+                                MT6360_PMU_DEVICE_TYPE,
+                                MT6360_USBCHGEN_MASK,
+                                pwr_rdy ? MT6360_USBCHGEN_MASK : 0);
+       if (ret < 0)
+               goto out;
+       mci->bc12_en = pwr_rdy;
+out:
+       mutex_unlock(&mci->chgdet_lock);
+}
+
+static void mt6360_chrdet_work(struct work_struct *work)
+{
+       struct mt6360_chg_info *mci = (struct mt6360_chg_info *)container_of(
+                                    work, struct mt6360_chg_info, chrdet_work);
+
+       mt6360_handle_chrdet_ext_evt(mci);
+}
+
+static irqreturn_t mt6360_pmu_chrdet_ext_evt_handler(int irq, void *data)
+{
+       struct mt6360_chg_info *mci = data;
+
+       mt6360_handle_chrdet_ext_evt(mci);
+       return IRQ_HANDLED;
+}
+
+static int mt6360_chg_irq_register(struct platform_device *pdev)
+{
+       const struct {
+               const char *name;
+               irq_handler_t handler;
+       } irq_descs[] = {
+               { "attach_i", mt6360_pmu_attach_i_handler },
+               { "chrdet_ext_evt", mt6360_pmu_chrdet_ext_evt_handler }
+       };
+       int i, ret;
+
+       for (i = 0; i < ARRAY_SIZE(irq_descs); i++) {
+               ret = platform_get_irq_byname(pdev, irq_descs[i].name);
+               if (ret < 0)
+                       return ret;
+
+               ret = devm_request_threaded_irq(&pdev->dev, ret, NULL,
+                                               irq_descs[i].handler,
+                                               IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+                                               irq_descs[i].name,
+                                               platform_get_drvdata(pdev));
+               if (ret < 0)
+                       return dev_err_probe(&pdev->dev, ret, "Failed to request %s irq\n",
+                                            irq_descs[i].name);
+       }
+
+       return 0;
+}
+
+static u32 mt6360_vinovp_trans_to_sel(u32 val)
+{
+       u32 vinovp_tbl[] = { 5500000, 6500000, 11000000, 14500000 };
+       int i;
+
+       /* Select the smaller and equal supported value */
+       for (i = 0; i < ARRAY_SIZE(vinovp_tbl)-1; i++) {
+               if (val < vinovp_tbl[i+1])
+                       break;
+       }
+       return i;
+}
+
+static int mt6360_chg_init_setting(struct mt6360_chg_info *mci)
+{
+       int ret;
+       u32 sel;
+
+       sel = mt6360_vinovp_trans_to_sel(mci->vinovp);
+       ret = regmap_update_bits(mci->regmap, MT6360_PMU_CHG_CTRL19,
+                                 MT6360_VINOVP_MASK, sel << MT6360_VINOVP_SHFT);
+       if (ret)
+               return dev_err_probe(mci->dev, ret, "%s: Failed to apply vinovp\n", __func__);
+       ret = regmap_update_bits(mci->regmap, MT6360_PMU_DEVICE_TYPE,
+                                MT6360_USBCHGEN_MASK, 0);
+       if (ret)
+               return dev_err_probe(mci->dev, ret, "%s: Failed to disable bc12\n", __func__);
+       ret = regmap_update_bits(mci->regmap, MT6360_PMU_CHG_CTRL2,
+                                MT6360_IINLMTSEL_MASK,
+                                MT6360_IINLMTSEL_AICR <<
+                                       MT6360_IINLMTSEL_SHFT);
+       if (ret)
+               return dev_err_probe(mci->dev, ret,
+                                    "%s: Failed to switch iinlmtsel to aicr\n", __func__);
+       usleep_range(5000, 6000);
+       ret = regmap_update_bits(mci->regmap, MT6360_PMU_CHG_CTRL3,
+                                MT6360_ILIM_EN_MASK, 0);
+       if (ret)
+               return dev_err_probe(mci->dev, ret,
+                                    "%s: Failed to disable ilim\n", __func__);
+       ret = regmap_update_bits(mci->regmap, MT6360_PMU_CHG_CTRL10,
+                                MT6360_OTG_OC_MASK, MT6360_OTG_OC_MASK);
+       if (ret)
+               return dev_err_probe(mci->dev, ret,
+                                    "%s: Failed to config otg oc to 3A\n", __func__);
+       return 0;
+}
+
+static int mt6360_charger_probe(struct platform_device *pdev)
+{
+       struct mt6360_chg_info *mci;
+       struct power_supply_config charger_cfg = {};
+       struct regulator_config config = { };
+       int ret;
+
+       mci = devm_kzalloc(&pdev->dev, sizeof(*mci), GFP_KERNEL);
+       if (!mci)
+               return -ENOMEM;
+
+       mci->dev = &pdev->dev;
+       mci->vinovp = 6500000;
+       mutex_init(&mci->chgdet_lock);
+       platform_set_drvdata(pdev, mci);
+       devm_work_autocancel(&pdev->dev, &mci->chrdet_work, mt6360_chrdet_work);
+
+       ret = device_property_read_u32(&pdev->dev, "richtek,vinovp-microvolt", &mci->vinovp);
+       if (ret)
+               dev_warn(&pdev->dev, "Failed to parse vinovp in DT, keep default 6.5v\n");
+
+       mci->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+       if (!mci->regmap)
+               return dev_err_probe(&pdev->dev, -ENODEV, "Failed to get parent regmap\n");
+
+       ret = mt6360_chg_init_setting(mci);
+       if (ret)
+               return dev_err_probe(&pdev->dev, ret, "Failed to initial setting\n");
+
+       memcpy(&mci->psy_desc, &mt6360_charger_desc, sizeof(mci->psy_desc));
+       mci->psy_desc.name = dev_name(&pdev->dev);
+       charger_cfg.drv_data = mci;
+       charger_cfg.of_node = pdev->dev.of_node;
+       mci->psy = devm_power_supply_register(&pdev->dev,
+                                             &mci->psy_desc, &charger_cfg);
+       if (IS_ERR(mci->psy))
+               return dev_err_probe(&pdev->dev, PTR_ERR(mci->psy),
+                                    "Failed to register power supply dev\n");
+
+
+       ret = mt6360_chg_irq_register(pdev);
+       if (ret)
+               return dev_err_probe(&pdev->dev, ret, "Failed to register irqs\n");
+
+       config.dev = &pdev->dev;
+       config.regmap = mci->regmap;
+       mci->otg_rdev = devm_regulator_register(&pdev->dev, &mt6360_otg_rdesc,
+                                               &config);
+       if (IS_ERR(mci->otg_rdev))
+               return PTR_ERR(mci->otg_rdev);
+
+       schedule_work(&mci->chrdet_work);
+
+       return 0;
+}
+
+static const struct of_device_id __maybe_unused mt6360_charger_of_id[] = {
+       { .compatible = "mediatek,mt6360-chg", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, mt6360_charger_of_id);
+
+static const struct platform_device_id mt6360_charger_id[] = {
+       { "mt6360-chg", 0 },
+       {},
+};
+MODULE_DEVICE_TABLE(platform, mt6360_charger_id);
+
+static struct platform_driver mt6360_charger_driver = {
+       .driver = {
+               .name = "mt6360-chg",
+               .of_match_table = of_match_ptr(mt6360_charger_of_id),
+       },
+       .probe = mt6360_charger_probe,
+       .id_table = mt6360_charger_id,
+};
+module_platform_driver(mt6360_charger_driver);
+
+MODULE_AUTHOR("Gene Chen <gene_chen@richtek.com>");
+MODULE_DESCRIPTION("MT6360 Charger Driver");
+MODULE_LICENSE("GPL");
index d99e2f1..0c2132c 100644 (file)
@@ -571,6 +571,7 @@ int power_supply_get_battery_info(struct power_supply *psy,
        int err, len, index;
        const __be32 *list;
 
+       info->technology                     = POWER_SUPPLY_TECHNOLOGY_UNKNOWN;
        info->energy_full_design_uwh         = -EINVAL;
        info->charge_full_design_uah         = -EINVAL;
        info->voltage_min_design_uv          = -EINVAL;
@@ -618,6 +619,24 @@ int power_supply_get_battery_info(struct power_supply *psy,
         * Documentation/power/power_supply_class.rst.
         */
 
+       if (!of_property_read_string(battery_np, "device-chemistry", &value)) {
+               if (!strcmp("nickel-cadmium", value))
+                       info->technology = POWER_SUPPLY_TECHNOLOGY_NiCd;
+               else if (!strcmp("nickel-metal-hydride", value))
+                       info->technology = POWER_SUPPLY_TECHNOLOGY_NiMH;
+               else if (!strcmp("lithium-ion", value))
+                       /* Imprecise lithium-ion type */
+                       info->technology = POWER_SUPPLY_TECHNOLOGY_LION;
+               else if (!strcmp("lithium-ion-polymer", value))
+                       info->technology = POWER_SUPPLY_TECHNOLOGY_LIPO;
+               else if (!strcmp("lithium-ion-iron-phosphate", value))
+                       info->technology = POWER_SUPPLY_TECHNOLOGY_LiFe;
+               else if (!strcmp("lithium-ion-manganese-oxide", value))
+                       info->technology = POWER_SUPPLY_TECHNOLOGY_LiMn;
+               else
+                       dev_warn(&psy->dev, "%s unknown battery type\n", value);
+       }
+
        of_property_read_u32(battery_np, "energy-full-design-microwatt-hours",
                             &info->energy_full_design_uwh);
        of_property_read_u32(battery_np, "charge-full-design-microamp-hours",
index c890e1c..84cc9fb 100644 (file)
@@ -929,11 +929,8 @@ static int smbb_charger_probe(struct platform_device *pdev)
                int irq;
 
                irq = platform_get_irq_byname(pdev, smbb_charger_irqs[i].name);
-               if (irq < 0) {
-                       dev_err(&pdev->dev, "failed to get irq '%s'\n",
-                               smbb_charger_irqs[i].name);
+               if (irq < 0)
                        return irq;
-               }
 
                smbb_charger_irqs[i].handler(irq, chg);
 
index 8190619..a5e09ac 100644 (file)
@@ -9,10 +9,12 @@
 #include <linux/device.h>
 #include <linux/bitops.h>
 #include <linux/errno.h>
+#include <linux/iio/consumer.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/mfd/rn5t618.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/power_supply.h>
 #include <linux/regmap.h>
@@ -64,6 +66,8 @@ struct rn5t618_power_info {
        struct power_supply *battery;
        struct power_supply *usb;
        struct power_supply *adp;
+       struct iio_channel *channel_vusb;
+       struct iio_channel *channel_vadp;
        int irq;
 };
 
@@ -77,6 +81,7 @@ static enum power_supply_usb_type rn5t618_usb_types[] = {
 static enum power_supply_property rn5t618_usb_props[] = {
        /* input current limit is not very accurate */
        POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
+       POWER_SUPPLY_PROP_VOLTAGE_NOW,
        POWER_SUPPLY_PROP_STATUS,
        POWER_SUPPLY_PROP_USB_TYPE,
        POWER_SUPPLY_PROP_ONLINE,
@@ -85,6 +90,7 @@ static enum power_supply_property rn5t618_usb_props[] = {
 static enum power_supply_property rn5t618_adp_props[] = {
        /* input current limit is not very accurate */
        POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
+       POWER_SUPPLY_PROP_VOLTAGE_NOW,
        POWER_SUPPLY_PROP_STATUS,
        POWER_SUPPLY_PROP_ONLINE,
 };
@@ -463,6 +469,15 @@ static int rn5t618_adp_get_property(struct power_supply *psy,
                        return ret;
 
                val->intval = FROM_CUR_REG(regval);
+               break;
+       case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+               if (!info->channel_vadp)
+                       return -ENODATA;
+
+               ret = iio_read_channel_processed_scale(info->channel_vadp, &val->intval, 1000);
+               if (ret < 0)
+                       return ret;
+
                break;
        default:
                return -EINVAL;
@@ -588,6 +603,15 @@ static int rn5t618_usb_get_property(struct power_supply *psy,
 
                        val->intval = FROM_CUR_REG(regval);
                }
+               break;
+       case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+               if (!info->channel_vusb)
+                       return -ENODATA;
+
+               ret = iio_read_channel_processed_scale(info->channel_vusb, &val->intval, 1000);
+               if (ret < 0)
+                       return ret;
+
                break;
        default:
                return -EINVAL;
@@ -711,6 +735,20 @@ static int rn5t618_power_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, info);
 
+       info->channel_vusb = devm_iio_channel_get(&pdev->dev, "vusb");
+       if (IS_ERR(info->channel_vusb)) {
+               if (PTR_ERR(info->channel_vusb) == -ENODEV)
+                       return -EPROBE_DEFER;
+               return PTR_ERR(info->channel_vusb);
+       }
+
+       info->channel_vadp = devm_iio_channel_get(&pdev->dev, "vadp");
+       if (IS_ERR(info->channel_vadp)) {
+               if (PTR_ERR(info->channel_vadp) == -ENODEV)
+                       return -EPROBE_DEFER;
+               return PTR_ERR(info->channel_vadp);
+       }
+
        ret = regmap_read(info->rn5t618->regmap, RN5T618_CONTROL, &v);
        if (ret)
                return ret;
index f84dbaa..c4a95b0 100644 (file)
@@ -31,8 +31,9 @@ enum {
        REG_CURRENT_AVG,
        REG_MAX_ERR,
        REG_CAPACITY,
-       REG_TIME_TO_EMPTY,
-       REG_TIME_TO_FULL,
+       REG_TIME_TO_EMPTY_NOW,
+       REG_TIME_TO_EMPTY_AVG,
+       REG_TIME_TO_FULL_AVG,
        REG_STATUS,
        REG_CAPACITY_LEVEL,
        REG_CYCLE_COUNT,
@@ -102,7 +103,7 @@ static const struct chip_data {
        [REG_TEMPERATURE] =
                SBS_DATA(POWER_SUPPLY_PROP_TEMP, 0x08, 0, 65535),
        [REG_VOLTAGE] =
-               SBS_DATA(POWER_SUPPLY_PROP_VOLTAGE_NOW, 0x09, 0, 20000),
+               SBS_DATA(POWER_SUPPLY_PROP_VOLTAGE_NOW, 0x09, 0, 65535),
        [REG_CURRENT_NOW] =
                SBS_DATA(POWER_SUPPLY_PROP_CURRENT_NOW, 0x0A, -32768, 32767),
        [REG_CURRENT_AVG] =
@@ -119,9 +120,11 @@ static const struct chip_data {
                SBS_DATA(POWER_SUPPLY_PROP_ENERGY_FULL, 0x10, 0, 65535),
        [REG_FULL_CHARGE_CAPACITY_CHARGE] =
                SBS_DATA(POWER_SUPPLY_PROP_CHARGE_FULL, 0x10, 0, 65535),
-       [REG_TIME_TO_EMPTY] =
+       [REG_TIME_TO_EMPTY_NOW] =
+               SBS_DATA(POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW, 0x11, 0, 65535),
+       [REG_TIME_TO_EMPTY_AVG] =
                SBS_DATA(POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG, 0x12, 0, 65535),
-       [REG_TIME_TO_FULL] =
+       [REG_TIME_TO_FULL_AVG] =
                SBS_DATA(POWER_SUPPLY_PROP_TIME_TO_FULL_AVG, 0x13, 0, 65535),
        [REG_CHARGE_CURRENT] =
                SBS_DATA(POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, 0x14, 0, 65535),
@@ -165,6 +168,7 @@ static const enum power_supply_property sbs_properties[] = {
        POWER_SUPPLY_PROP_CAPACITY,
        POWER_SUPPLY_PROP_CAPACITY_ERROR_MARGIN,
        POWER_SUPPLY_PROP_TEMP,
+       POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
        POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG,
        POWER_SUPPLY_PROP_TIME_TO_FULL_AVG,
        POWER_SUPPLY_PROP_SERIAL_NUMBER,
@@ -748,6 +752,7 @@ static void  sbs_unit_adjustment(struct i2c_client *client,
                val->intval -= TEMP_KELVIN_TO_CELSIUS;
                break;
 
+       case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW:
        case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG:
        case POWER_SUPPLY_PROP_TIME_TO_FULL_AVG:
                /* sbs provides time to empty and time to full in minutes.
@@ -966,6 +971,7 @@ static int sbs_get_property(struct power_supply *psy,
        case POWER_SUPPLY_PROP_CURRENT_NOW:
        case POWER_SUPPLY_PROP_CURRENT_AVG:
        case POWER_SUPPLY_PROP_TEMP:
+       case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW:
        case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG:
        case POWER_SUPPLY_PROP_TIME_TO_FULL_AVG:
        case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
index 1ae8374..ae45069 100644 (file)
@@ -1229,10 +1229,8 @@ static int sc27xx_fgu_probe(struct platform_device *pdev)
        }
 
        irq = platform_get_irq(pdev, 0);
-       if (irq < 0) {
-               dev_err(dev, "no irq resource specified\n");
+       if (irq < 0)
                return irq;
-       }
 
        ret = devm_request_threaded_irq(data->dev, irq, NULL,
                                        sc27xx_fgu_interrupt,
index df24042..753944e 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/power_supply.h>
 #include <linux/property.h>
 #include <linux/regmap.h>
+#include <linux/regulator/driver.h>
 
 #include <dt-bindings/power/summit,smb347-charger.h>
 
@@ -55,6 +56,7 @@
 #define CFG_PIN_EN_CTRL_ACTIVE_LOW             0x60
 #define CFG_PIN_EN_APSD_IRQ                    BIT(1)
 #define CFG_PIN_EN_CHARGER_ERROR               BIT(2)
+#define CFG_PIN_EN_CTRL                                BIT(4)
 #define CFG_THERM                              0x07
 #define CFG_THERM_SOFT_HOT_COMPENSATION_MASK   0x03
 #define CFG_THERM_SOFT_HOT_COMPENSATION_SHIFT  0
 #define CFG_THERM_SOFT_COLD_COMPENSATION_SHIFT 2
 #define CFG_THERM_MONITOR_DISABLED             BIT(4)
 #define CFG_SYSOK                              0x08
+#define CFG_SYSOK_INOK_ACTIVE_HIGH             BIT(0)
 #define CFG_SYSOK_SUSPEND_HARD_LIMIT_DISABLED  BIT(2)
 #define CFG_OTHER                              0x09
 #define CFG_OTHER_RID_MASK                     0xc0
 #define CFG_OTHER_RID_ENABLED_AUTO_OTG         0xc0
 #define CFG_OTG                                        0x0a
 #define CFG_OTG_TEMP_THRESHOLD_MASK            0x30
+#define CFG_OTG_CURRENT_LIMIT_250mA            BIT(2)
+#define CFG_OTG_CURRENT_LIMIT_750mA            BIT(3)
 #define CFG_OTG_TEMP_THRESHOLD_SHIFT           4
 #define CFG_OTG_CC_COMPENSATION_MASK           0xc0
 #define CFG_OTG_CC_COMPENSATION_SHIFT          6
@@ -91,6 +96,7 @@
 #define CMD_A                                  0x30
 #define CMD_A_CHG_ENABLED                      BIT(1)
 #define CMD_A_SUSPEND_ENABLED                  BIT(2)
+#define CMD_A_OTG_ENABLED                      BIT(4)
 #define CMD_A_ALLOW_WRITE                      BIT(7)
 #define CMD_B                                  0x31
 #define CMD_C                                  0x33
  * @regmap: pointer to driver regmap
  * @mains: power_supply instance for AC/DC power
  * @usb: power_supply instance for USB power
+ * @usb_rdev: USB VBUS regulator device
  * @id: SMB charger ID
  * @mains_online: is AC/DC input connected
  * @usb_online: is USB input connected
- * @charging_enabled: is charging enabled
  * @irq_unsupported: is interrupt unsupported by SMB hardware
+ * @usb_vbus_enabled: is USB VBUS powered by SMB charger
  * @max_charge_current: maximum current (in uA) the battery can be charged
  * @max_charge_voltage: maximum voltage (in uV) the battery can be charged
  * @pre_charge_current: current (in uA) to use in pre-charging phase
  * @use_usb_otg: USB OTG output can be used (not implemented yet)
  * @enable_control: how charging enable/disable is controlled
  *                 (driver/pin controls)
+ * @inok_polarity: polarity of INOK signal which denotes presence of external
+ *                power supply
  *
  * @use_main, @use_usb, and @use_usb_otg are means to enable/disable
  * hardware support for these. This is useful when we want to have for
@@ -189,11 +198,12 @@ struct smb347_charger {
        struct regmap           *regmap;
        struct power_supply     *mains;
        struct power_supply     *usb;
+       struct regulator_dev    *usb_rdev;
        unsigned int            id;
        bool                    mains_online;
        bool                    usb_online;
-       bool                    charging_enabled;
        bool                    irq_unsupported;
+       bool                    usb_vbus_enabled;
 
        unsigned int            max_charge_current;
        unsigned int            max_charge_voltage;
@@ -214,6 +224,7 @@ struct smb347_charger {
        bool                    use_usb;
        bool                    use_usb_otg;
        unsigned int            enable_control;
+       unsigned int            inok_polarity;
 };
 
 enum smb_charger_chipid {
@@ -358,21 +369,18 @@ static int smb347_charging_status(struct smb347_charger *smb)
 
 static int smb347_charging_set(struct smb347_charger *smb, bool enable)
 {
-       int ret = 0;
-
        if (smb->enable_control != SMB3XX_CHG_ENABLE_SW) {
                dev_dbg(smb->dev, "charging enable/disable in SW disabled\n");
                return 0;
        }
 
-       if (smb->charging_enabled != enable) {
-               ret = regmap_update_bits(smb->regmap, CMD_A, CMD_A_CHG_ENABLED,
-                                        enable ? CMD_A_CHG_ENABLED : 0);
-               if (!ret)
-                       smb->charging_enabled = enable;
+       if (enable && smb->usb_vbus_enabled) {
+               dev_dbg(smb->dev, "charging not enabled because USB is in host mode\n");
+               return 0;
        }
 
-       return ret;
+       return regmap_update_bits(smb->regmap, CMD_A, CMD_A_CHG_ENABLED,
+                                 enable ? CMD_A_CHG_ENABLED : 0);
 }
 
 static inline int smb347_charging_enable(struct smb347_charger *smb)
@@ -671,10 +679,22 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
  *
  * Returns %0 on success and negative errno in case of failure.
  */
-static int smb347_set_writable(struct smb347_charger *smb, bool writable)
+static int smb347_set_writable(struct smb347_charger *smb, bool writable,
+                              bool irq_toggle)
 {
-       return regmap_update_bits(smb->regmap, CMD_A, CMD_A_ALLOW_WRITE,
-                                 writable ? CMD_A_ALLOW_WRITE : 0);
+       struct i2c_client *client = to_i2c_client(smb->dev);
+       int ret;
+
+       if (writable && irq_toggle && !smb->irq_unsupported)
+               disable_irq(client->irq);
+
+       ret = regmap_update_bits(smb->regmap, CMD_A, CMD_A_ALLOW_WRITE,
+                                writable ? CMD_A_ALLOW_WRITE : 0);
+
+       if ((!writable || ret) && irq_toggle && !smb->irq_unsupported)
+               enable_irq(client->irq);
+
+       return ret;
 }
 
 static int smb347_hw_init(struct smb347_charger *smb)
@@ -682,7 +702,7 @@ static int smb347_hw_init(struct smb347_charger *smb)
        unsigned int val;
        int ret;
 
-       ret = smb347_set_writable(smb, true);
+       ret = smb347_set_writable(smb, true, false);
        if (ret < 0)
                return ret;
 
@@ -724,6 +744,15 @@ static int smb347_hw_init(struct smb347_charger *smb)
        if (ret < 0)
                goto fail;
 
+       /* Activate pin control, making it writable. */
+       switch (smb->enable_control) {
+       case SMB3XX_CHG_ENABLE_PIN_ACTIVE_LOW:
+       case SMB3XX_CHG_ENABLE_PIN_ACTIVE_HIGH:
+               ret = regmap_set_bits(smb->regmap, CFG_PIN, CFG_PIN_EN_CTRL);
+               if (ret < 0)
+                       goto fail;
+       }
+
        /*
         * Make the charging functionality controllable by a write to the
         * command register unless pin control is specified in the platform
@@ -758,7 +787,7 @@ static int smb347_hw_init(struct smb347_charger *smb)
        ret = smb347_start_stop_charging(smb);
 
 fail:
-       smb347_set_writable(smb, false);
+       smb347_set_writable(smb, false, false);
        return ret;
 }
 
@@ -866,7 +895,7 @@ static int smb347_irq_set(struct smb347_charger *smb, bool enable)
        if (smb->irq_unsupported)
                return 0;
 
-       ret = smb347_set_writable(smb, true);
+       ret = smb347_set_writable(smb, true, true);
        if (ret < 0)
                return ret;
 
@@ -891,7 +920,7 @@ static int smb347_irq_set(struct smb347_charger *smb, bool enable)
        ret = regmap_update_bits(smb->regmap, CFG_PIN, CFG_PIN_EN_CHARGER_ERROR,
                                 enable ? CFG_PIN_EN_CHARGER_ERROR : 0);
 fail:
-       smb347_set_writable(smb, false);
+       smb347_set_writable(smb, false, true);
        return ret;
 }
 
@@ -919,7 +948,7 @@ static int smb347_irq_init(struct smb347_charger *smb,
        if (!client->irq)
                return 0;
 
-       ret = smb347_set_writable(smb, true);
+       ret = smb347_set_writable(smb, true, false);
        if (ret < 0)
                return ret;
 
@@ -931,7 +960,7 @@ static int smb347_irq_init(struct smb347_charger *smb,
                                 CFG_STAT_ACTIVE_HIGH | CFG_STAT_DISABLED,
                                 CFG_STAT_DISABLED);
 
-       smb347_set_writable(smb, false);
+       smb347_set_writable(smb, false, false);
 
        if (ret < 0) {
                dev_warn(smb->dev, "failed to initialize IRQ: %d\n", ret);
@@ -1241,6 +1270,13 @@ static void smb347_dt_parse_dev_info(struct smb347_charger *smb)
        /* Select charging control */
        device_property_read_u32(dev, "summit,enable-charge-control",
                                 &smb->enable_control);
+
+       /*
+        * Polarity of INOK signal indicating presence of external power
+        * supply connected to the charger.
+        */
+       device_property_read_u32(dev, "summit,inok-polarity",
+                                &smb->inok_polarity);
 }
 
 static int smb347_get_battery_info(struct smb347_charger *smb)
@@ -1292,12 +1328,176 @@ static int smb347_get_battery_info(struct smb347_charger *smb)
        return 0;
 }
 
+static int smb347_usb_vbus_get_current_limit(struct regulator_dev *rdev)
+{
+       struct smb347_charger *smb = rdev_get_drvdata(rdev);
+       unsigned int val;
+       int ret;
+
+       ret = regmap_read(smb->regmap, CFG_OTG, &val);
+       if (ret < 0)
+               return ret;
+
+       /*
+        * It's unknown what happens if this bit is unset due to lack of
+        * access to the datasheet, assume it's limit-enable.
+        */
+       if (!(val & CFG_OTG_CURRENT_LIMIT_250mA))
+               return 0;
+
+       return val & CFG_OTG_CURRENT_LIMIT_750mA ? 750000 : 250000;
+}
+
+static int smb347_usb_vbus_set_new_current_limit(struct smb347_charger *smb,
+                                                int max_uA)
+{
+       const unsigned int mask = CFG_OTG_CURRENT_LIMIT_750mA |
+                                 CFG_OTG_CURRENT_LIMIT_250mA;
+       unsigned int val = CFG_OTG_CURRENT_LIMIT_250mA;
+       int ret;
+
+       if (max_uA >= 750000)
+               val |= CFG_OTG_CURRENT_LIMIT_750mA;
+
+       ret = regmap_update_bits(smb->regmap, CFG_OTG, mask, val);
+       if (ret < 0)
+               dev_err(smb->dev, "failed to change USB current limit\n");
+
+       return ret;
+}
+
+static int smb347_usb_vbus_set_current_limit(struct regulator_dev *rdev,
+                                            int min_uA, int max_uA)
+{
+       struct smb347_charger *smb = rdev_get_drvdata(rdev);
+       int ret;
+
+       ret = smb347_set_writable(smb, true, true);
+       if (ret < 0)
+               return ret;
+
+       ret = smb347_usb_vbus_set_new_current_limit(smb, max_uA);
+       smb347_set_writable(smb, false, true);
+
+       return ret;
+}
+
+static int smb347_usb_vbus_regulator_enable(struct regulator_dev *rdev)
+{
+       struct smb347_charger *smb = rdev_get_drvdata(rdev);
+       int ret, max_uA;
+
+       ret = smb347_set_writable(smb, true, true);
+       if (ret < 0)
+               return ret;
+
+       smb347_charging_disable(smb);
+
+       if (device_property_read_bool(&rdev->dev, "summit,needs-inok-toggle")) {
+               unsigned int sysok = 0;
+
+               if (smb->inok_polarity == SMB3XX_SYSOK_INOK_ACTIVE_LOW)
+                       sysok = CFG_SYSOK_INOK_ACTIVE_HIGH;
+
+               /*
+                * VBUS won't be powered if INOK is active, so we need to
+                * manually disable INOK on some platforms.
+                */
+               ret = regmap_update_bits(smb->regmap, CFG_SYSOK,
+                                        CFG_SYSOK_INOK_ACTIVE_HIGH, sysok);
+               if (ret < 0) {
+                       dev_err(smb->dev, "failed to disable INOK\n");
+                       goto done;
+               }
+       }
+
+       ret = smb347_usb_vbus_get_current_limit(rdev);
+       if (ret < 0) {
+               dev_err(smb->dev, "failed to get USB VBUS current limit\n");
+               goto done;
+       }
+
+       max_uA = ret;
+
+       ret = smb347_usb_vbus_set_new_current_limit(smb, 250000);
+       if (ret < 0) {
+               dev_err(smb->dev, "failed to preset USB VBUS current limit\n");
+               goto done;
+       }
+
+       ret = regmap_set_bits(smb->regmap, CMD_A, CMD_A_OTG_ENABLED);
+       if (ret < 0) {
+               dev_err(smb->dev, "failed to enable USB VBUS\n");
+               goto done;
+       }
+
+       smb->usb_vbus_enabled = true;
+
+       ret = smb347_usb_vbus_set_new_current_limit(smb, max_uA);
+       if (ret < 0) {
+               dev_err(smb->dev, "failed to restore USB VBUS current limit\n");
+               goto done;
+       }
+done:
+       smb347_set_writable(smb, false, true);
+
+       return ret;
+}
+
+static int smb347_usb_vbus_regulator_disable(struct regulator_dev *rdev)
+{
+       struct smb347_charger *smb = rdev_get_drvdata(rdev);
+       int ret;
+
+       ret = smb347_set_writable(smb, true, true);
+       if (ret < 0)
+               return ret;
+
+       ret = regmap_clear_bits(smb->regmap, CMD_A, CMD_A_OTG_ENABLED);
+       if (ret < 0) {
+               dev_err(smb->dev, "failed to disable USB VBUS\n");
+               goto done;
+       }
+
+       smb->usb_vbus_enabled = false;
+
+       if (device_property_read_bool(&rdev->dev, "summit,needs-inok-toggle")) {
+               unsigned int sysok = 0;
+
+               if (smb->inok_polarity == SMB3XX_SYSOK_INOK_ACTIVE_HIGH)
+                       sysok = CFG_SYSOK_INOK_ACTIVE_HIGH;
+
+               ret = regmap_update_bits(smb->regmap, CFG_SYSOK,
+                                        CFG_SYSOK_INOK_ACTIVE_HIGH, sysok);
+               if (ret < 0) {
+                       dev_err(smb->dev, "failed to enable INOK\n");
+                       goto done;
+               }
+       }
+
+       smb347_start_stop_charging(smb);
+done:
+       smb347_set_writable(smb, false, true);
+
+       return ret;
+}
+
 static const struct regmap_config smb347_regmap = {
        .reg_bits       = 8,
        .val_bits       = 8,
        .max_register   = SMB347_MAX_REGISTER,
        .volatile_reg   = smb347_volatile_reg,
        .readable_reg   = smb347_readable_reg,
+       .cache_type     = REGCACHE_FLAT,
+       .num_reg_defaults_raw = SMB347_MAX_REGISTER,
+};
+
+static const struct regulator_ops smb347_usb_vbus_regulator_ops = {
+       .is_enabled     = regulator_is_enabled_regmap,
+       .enable         = smb347_usb_vbus_regulator_enable,
+       .disable        = smb347_usb_vbus_regulator_disable,
+       .get_current_limit = smb347_usb_vbus_get_current_limit,
+       .set_current_limit = smb347_usb_vbus_set_current_limit,
 };
 
 static const struct power_supply_desc smb347_mains_desc = {
@@ -1316,10 +1516,24 @@ static const struct power_supply_desc smb347_usb_desc = {
        .num_properties = ARRAY_SIZE(smb347_properties),
 };
 
+static const struct regulator_desc smb347_usb_vbus_regulator_desc = {
+       .name           = "smb347-usb-vbus",
+       .of_match       = of_match_ptr("usb-vbus"),
+       .ops            = &smb347_usb_vbus_regulator_ops,
+       .type           = REGULATOR_VOLTAGE,
+       .owner          = THIS_MODULE,
+       .enable_reg     = CMD_A,
+       .enable_mask    = CMD_A_OTG_ENABLED,
+       .enable_val     = CMD_A_OTG_ENABLED,
+       .fixed_uV       = 5000000,
+       .n_voltages     = 1,
+};
+
 static int smb347_probe(struct i2c_client *client,
                        const struct i2c_device_id *id)
 {
        struct power_supply_config mains_usb_cfg = {};
+       struct regulator_config usb_rdev_cfg = {};
        struct device *dev = &client->dev;
        struct smb347_charger *smb;
        int ret;
@@ -1367,6 +1581,18 @@ static int smb347_probe(struct i2c_client *client,
        if (ret)
                return ret;
 
+       usb_rdev_cfg.dev = dev;
+       usb_rdev_cfg.driver_data = smb;
+       usb_rdev_cfg.regmap = smb->regmap;
+
+       smb->usb_rdev = devm_regulator_register(dev,
+                                               &smb347_usb_vbus_regulator_desc,
+                                               &usb_rdev_cfg);
+       if (IS_ERR(smb->usb_rdev)) {
+               smb347_irq_disable(smb);
+               return PTR_ERR(smb->usb_rdev);
+       }
+
        return 0;
 }
 
@@ -1374,11 +1600,17 @@ static int smb347_remove(struct i2c_client *client)
 {
        struct smb347_charger *smb = i2c_get_clientdata(client);
 
+       smb347_usb_vbus_regulator_disable(smb->usb_rdev);
        smb347_irq_disable(smb);
 
        return 0;
 }
 
+static void smb347_shutdown(struct i2c_client *client)
+{
+       smb347_remove(client);
+}
+
 static const struct i2c_device_id smb347_id[] = {
        { "smb345", SMB345 },
        { "smb347", SMB347 },
@@ -1402,6 +1634,7 @@ static struct i2c_driver smb347_driver = {
        },
        .probe = smb347_probe,
        .remove = smb347_remove,
+       .shutdown = smb347_shutdown,
        .id_table = smb347_id,
 };
 module_i2c_driver(smb347_driver);
index 8c20e52..e085c25 100644 (file)
@@ -90,7 +90,8 @@ config PTP_1588_CLOCK_INES
 config PTP_1588_CLOCK_PCH
        tristate "Intel PCH EG20T as PTP clock"
        depends on X86_32 || COMPILE_TEST
-       depends on HAS_IOMEM && NET
+       depends on HAS_IOMEM && PCI
+       depends on NET
        imply PTP_1588_CLOCK
        help
          This driver adds support for using the PCH EG20T as a PTP
index b3d96b7..41b92dc 100644 (file)
@@ -154,7 +154,7 @@ static int unregister_vclock(struct device *dev, void *data)
        struct ptp_clock *ptp = dev_get_drvdata(dev);
        struct ptp_clock_info *info = ptp->info;
        struct ptp_vclock *vclock;
-       u8 *num = data;
+       u32 *num = data;
 
        vclock = info_to_vclock(info);
        dev_info(dev->parent, "delete virtual clock ptp%d\n",
index 24ce9a1..4fd13b0 100644 (file)
@@ -1044,7 +1044,7 @@ config REGULATOR_RT6160
        help
          This adds support for voltage regulator in Richtek RT6160.
          This device automatically change voltage output mode from
-         Buck or Boost. The mode transistion depend on the input source voltage.
+         Buck or Boost. The mode transition depend on the input source voltage.
          The wide output range is from 2025mV to 5200mV and can be used on most
          common application scenario.
 
@@ -1053,10 +1053,21 @@ config REGULATOR_RT6245
        depends on I2C
        select REGMAP_I2C
        help
-         This adds supprot for Richtek RT6245 voltage regulator.
+         This adds support for Richtek RT6245 voltage regulator.
          It can support up to 14A output current and adjustable output voltage
          from 0.4375V to 1.3875V, per step 12.5mV.
 
+config REGULATOR_RTQ2134
+       tristate "Richtek RTQ2134 SubPMIC Regulator"
+       depends on I2C
+       select REGMAP_I2C
+       help
+         This driver adds support for RTQ2134 SubPMIC regulators.
+         The RTQ2134 is a multi-phase, programmable power management IC that
+         integrate with four high efficient, synchronous step-down converter
+         cores. It features wide output voltage range and the capability to
+         configure the corresponding power stages.
+
 config REGULATOR_RTMV20
        tristate "Richtek RTMV20 Laser Diode Regulator"
        depends on I2C
@@ -1066,6 +1077,15 @@ config REGULATOR_RTMV20
          the Richtek RTMV20. It can support the load current up to 6A and
          integrate strobe/vsync/fsin signal to synchronize the IR camera.
 
+config REGULATOR_RTQ6752
+       tristate "Richtek RTQ6752 TFT LCD voltage regulator"
+       depends on I2C
+       select REGMAP_I2C
+       help
+         This driver adds support for Richtek RTQ6752. RTQ6752 includes two
+         synchronous boost converters for PAVDD, and one synchronous NAVDD
+         buck-boost. This device is suitable for automotive TFT-LCD panel.
+
 config REGULATOR_S2MPA01
        tristate "Samsung S2MPA01 voltage regulator"
        depends on MFD_SEC_CORE || COMPILE_TEST
index 8c2f822..9e382b5 100644 (file)
@@ -128,6 +128,8 @@ obj-$(CONFIG_REGULATOR_RT5033)      += rt5033-regulator.o
 obj-$(CONFIG_REGULATOR_RT6160) += rt6160-regulator.o
 obj-$(CONFIG_REGULATOR_RT6245) += rt6245-regulator.o
 obj-$(CONFIG_REGULATOR_RTMV20) += rtmv20-regulator.o
+obj-$(CONFIG_REGULATOR_RTQ2134) += rtq2134-regulator.o
+obj-$(CONFIG_REGULATOR_RTQ6752)        += rtq6752-regulator.o
 obj-$(CONFIG_REGULATOR_S2MPA01) += s2mpa01.o
 obj-$(CONFIG_REGULATOR_S2MPS11) += s2mps11.o
 obj-$(CONFIG_REGULATOR_S5M8767) += s5m8767.o
index b1eb469..d60fcce 100644 (file)
@@ -55,7 +55,8 @@
 #define BD718XX_HWOPNAME(swopname) swopname##_hwcontrol
 
 #define BD718XX_OPS(name, _list_voltage, _map_voltage, _set_voltage_sel, \
-                  _get_voltage_sel, _set_voltage_time_sel, _set_ramp_delay) \
+                  _get_voltage_sel, _set_voltage_time_sel, _set_ramp_delay, \
+                  _set_uvp, _set_ovp)                          \
 static const struct regulator_ops name = {                     \
        .enable = regulator_enable_regmap,                      \
        .disable = regulator_disable_regmap,                    \
@@ -66,6 +67,8 @@ static const struct regulator_ops name = {                    \
        .get_voltage_sel = (_get_voltage_sel),                  \
        .set_voltage_time_sel = (_set_voltage_time_sel),        \
        .set_ramp_delay = (_set_ramp_delay),                    \
+       .set_under_voltage_protection = (_set_uvp),             \
+       .set_over_voltage_protection = (_set_ovp),              \
 };                                                             \
                                                                \
 static const struct regulator_ops BD718XX_HWOPNAME(name) = {   \
@@ -76,6 +79,8 @@ static const struct regulator_ops BD718XX_HWOPNAME(name) = {  \
        .get_voltage_sel = (_get_voltage_sel),                  \
        .set_voltage_time_sel = (_set_voltage_time_sel),        \
        .set_ramp_delay = (_set_ramp_delay),                    \
+       .set_under_voltage_protection = (_set_uvp),             \
+       .set_over_voltage_protection = (_set_ovp),              \
 }                                                              \
 
 /*
@@ -154,17 +159,9 @@ static void voltage_change_done(struct regulator_dev *rdev, unsigned int sel,
                 * exceed it due to the scheduling.
                 */
                msleep(1);
-               /*
-                * Note for next hacker. The PWRGOOD should not be masked on
-                * BD71847 so we will just unconditionally enable detection
-                * when voltage is set.
-                * If someone want's to disable PWRGOOD he must implement
-                * caching and restoring the old value here. I am not
-                * aware of such use-cases so for the sake of the simplicity
-                * we just always enable PWRGOOD here.
-                */
-               ret = regmap_update_bits(rdev->regmap, BD718XX_REG_MVRFLTMASK2,
-                                        *mask, 0);
+
+               ret = regmap_clear_bits(rdev->regmap, BD718XX_REG_MVRFLTMASK2,
+                                        *mask);
                if (ret)
                        dev_err(&rdev->dev,
                                "Failed to re-enable voltage monitoring (%d)\n",
@@ -208,12 +205,27 @@ static int voltage_change_prepare(struct regulator_dev *rdev, unsigned int sel,
                 * time configurable.
                 */
                if (new > now) {
+                       int tmp;
+                       int prot_bit;
                        int ldo_offset = rdev->desc->id - BD718XX_LDO1;
 
-                       *mask = BD718XX_LDO1_VRMON80 << ldo_offset;
-                       ret = regmap_update_bits(rdev->regmap,
-                                                BD718XX_REG_MVRFLTMASK2,
-                                                *mask, *mask);
+                       prot_bit = BD718XX_LDO1_VRMON80 << ldo_offset;
+                       ret = regmap_read(rdev->regmap, BD718XX_REG_MVRFLTMASK2,
+                                         &tmp);
+                       if (ret) {
+                               dev_err(&rdev->dev,
+                                       "Failed to read voltage monitoring state\n");
+                               return ret;
+                       }
+
+                       if (!(tmp & prot_bit)) {
+                               /* We disable protection if it was enabled... */
+                               ret = regmap_set_bits(rdev->regmap,
+                                                     BD718XX_REG_MVRFLTMASK2,
+                                                     prot_bit);
+                               /* ...and we also want to re-enable it */
+                               *mask = prot_bit;
+                       }
                        if (ret) {
                                dev_err(&rdev->dev,
                                        "Failed to stop voltage monitoring\n");
@@ -266,99 +278,6 @@ static int bd71837_set_voltage_sel_pickable_restricted(
        return regulator_set_voltage_sel_pickable_regmap(rdev, sel);
 }
 
-/*
- * OPS common for BD71847 and BD71850
- */
-BD718XX_OPS(bd718xx_pickable_range_ldo_ops,
-           regulator_list_voltage_pickable_linear_range, NULL,
-           bd718xx_set_voltage_sel_pickable_restricted,
-           regulator_get_voltage_sel_pickable_regmap, NULL, NULL);
-
-/* BD71847 and BD71850 LDO 5 is by default OFF at RUN state */
-static const struct regulator_ops bd718xx_ldo5_ops_hwstate = {
-       .is_enabled = never_enabled_by_hwstate,
-       .list_voltage = regulator_list_voltage_pickable_linear_range,
-       .set_voltage_sel = bd718xx_set_voltage_sel_pickable_restricted,
-       .get_voltage_sel = regulator_get_voltage_sel_pickable_regmap,
-};
-
-BD718XX_OPS(bd718xx_pickable_range_buck_ops,
-           regulator_list_voltage_pickable_linear_range, NULL,
-           regulator_set_voltage_sel_pickable_regmap,
-           regulator_get_voltage_sel_pickable_regmap,
-           regulator_set_voltage_time_sel, NULL);
-
-BD718XX_OPS(bd718xx_ldo_regulator_ops, regulator_list_voltage_linear_range,
-           NULL, bd718xx_set_voltage_sel_restricted,
-           regulator_get_voltage_sel_regmap, NULL, NULL);
-
-BD718XX_OPS(bd718xx_ldo_regulator_nolinear_ops, regulator_list_voltage_table,
-           NULL, bd718xx_set_voltage_sel_restricted,
-           regulator_get_voltage_sel_regmap, NULL, NULL);
-
-BD718XX_OPS(bd718xx_buck_regulator_ops, regulator_list_voltage_linear_range,
-           NULL, regulator_set_voltage_sel_regmap,
-           regulator_get_voltage_sel_regmap, regulator_set_voltage_time_sel,
-           NULL);
-
-BD718XX_OPS(bd718xx_buck_regulator_nolinear_ops, regulator_list_voltage_table,
-           regulator_map_voltage_ascend, regulator_set_voltage_sel_regmap,
-           regulator_get_voltage_sel_regmap, regulator_set_voltage_time_sel,
-           NULL);
-
-/*
- * OPS for BD71837
- */
-BD718XX_OPS(bd71837_pickable_range_ldo_ops,
-           regulator_list_voltage_pickable_linear_range, NULL,
-           bd71837_set_voltage_sel_pickable_restricted,
-           regulator_get_voltage_sel_pickable_regmap, NULL, NULL);
-
-BD718XX_OPS(bd71837_pickable_range_buck_ops,
-           regulator_list_voltage_pickable_linear_range, NULL,
-           bd71837_set_voltage_sel_pickable_restricted,
-           regulator_get_voltage_sel_pickable_regmap,
-           regulator_set_voltage_time_sel, NULL);
-
-BD718XX_OPS(bd71837_ldo_regulator_ops, regulator_list_voltage_linear_range,
-           NULL, bd71837_set_voltage_sel_restricted,
-           regulator_get_voltage_sel_regmap, NULL, NULL);
-
-BD718XX_OPS(bd71837_ldo_regulator_nolinear_ops, regulator_list_voltage_table,
-           NULL, bd71837_set_voltage_sel_restricted,
-           regulator_get_voltage_sel_regmap, NULL, NULL);
-
-BD718XX_OPS(bd71837_buck_regulator_ops, regulator_list_voltage_linear_range,
-           NULL, bd71837_set_voltage_sel_restricted,
-           regulator_get_voltage_sel_regmap, regulator_set_voltage_time_sel,
-           NULL);
-
-BD718XX_OPS(bd71837_buck_regulator_nolinear_ops, regulator_list_voltage_table,
-           regulator_map_voltage_ascend, bd71837_set_voltage_sel_restricted,
-           regulator_get_voltage_sel_regmap, regulator_set_voltage_time_sel,
-           NULL);
-/*
- * BD71837 bucks 3 and 4 support defining their enable/disable state also
- * when buck enable state is under HW state machine control. In that case the
- * bit [2] in CTRL register is used to indicate if regulator should be ON.
- */
-static const struct regulator_ops bd71837_buck34_ops_hwctrl = {
-       .is_enabled = bd71837_get_buck34_enable_hwctrl,
-       .list_voltage = regulator_list_voltage_linear_range,
-       .set_voltage_sel = regulator_set_voltage_sel_regmap,
-       .get_voltage_sel = regulator_get_voltage_sel_regmap,
-       .set_voltage_time_sel = regulator_set_voltage_time_sel,
-       .set_ramp_delay = regulator_set_ramp_delay_regmap,
-};
-
-/*
- * OPS for all of the ICs - BD718(37/47/50)
- */
-BD718XX_OPS(bd718xx_dvs_buck_regulator_ops, regulator_list_voltage_linear_range,
-           NULL, regulator_set_voltage_sel_regmap,
-           regulator_get_voltage_sel_regmap, regulator_set_voltage_time_sel,
-           /* bd718xx_buck1234_set_ramp_delay */ regulator_set_ramp_delay_regmap);
-
 /*
  * BD71837 BUCK1/2/3/4
  * BD71847 BUCK1/2
@@ -536,6 +455,238 @@ struct bd718xx_regulator_data {
        int additional_init_amnt;
 };
 
+static int bd718x7_xvp_sanity_check(struct regulator_dev *rdev, int lim_uV,
+                                   int severity)
+{
+       /*
+        * BD71837/47/50 ... (ICs supported by this driver) do not provide
+        * warnings, only protection
+        */
+       if (severity != REGULATOR_SEVERITY_PROT) {
+               dev_err(&rdev->dev,
+                       "Unsupported Under Voltage protection level\n");
+               return -EINVAL;
+       }
+
+       /*
+        * And protection limit is not changeable. It can only be enabled
+        * or disabled
+        */
+       if (lim_uV)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int bd718x7_set_ldo_uvp(struct regulator_dev *rdev, int lim_uV,
+                              int severity, bool enable)
+{
+       int ldo_offset = rdev->desc->id - BD718XX_LDO1;
+       int prot_bit, ret;
+
+       ret = bd718x7_xvp_sanity_check(rdev, lim_uV, severity);
+       if (ret)
+               return ret;
+
+       prot_bit = BD718XX_LDO1_VRMON80 << ldo_offset;
+
+       if (enable)
+               return regmap_clear_bits(rdev->regmap, BD718XX_REG_MVRFLTMASK2,
+                                        prot_bit);
+
+       return regmap_set_bits(rdev->regmap, BD718XX_REG_MVRFLTMASK2,
+                              prot_bit);
+}
+
+static int bd718x7_get_buck_prot_reg(int id, int *reg)
+{
+
+       if (id > BD718XX_BUCK8) {
+               WARN_ON(id > BD718XX_BUCK8);
+               return -EINVAL;
+       }
+
+       if (id > BD718XX_BUCK4)
+               *reg = BD718XX_REG_MVRFLTMASK0;
+       else
+               *reg = BD718XX_REG_MVRFLTMASK1;
+
+       return 0;
+}
+
+static int bd718x7_get_buck_ovp_info(int id, int *reg, int *bit)
+{
+       int ret;
+
+       ret = bd718x7_get_buck_prot_reg(id, reg);
+       if (ret)
+               return ret;
+
+       *bit = BIT((id % 4) * 2 + 1);
+
+       return 0;
+}
+
+static int bd718x7_get_buck_uvp_info(int id, int *reg, int *bit)
+{
+       int ret;
+
+       ret = bd718x7_get_buck_prot_reg(id, reg);
+       if (ret)
+               return ret;
+
+       *bit = BIT((id % 4) * 2);
+
+       return 0;
+}
+
+static int bd718x7_set_buck_uvp(struct regulator_dev *rdev, int lim_uV,
+                               int severity, bool enable)
+{
+       int bit, reg, ret;
+
+       ret = bd718x7_xvp_sanity_check(rdev, lim_uV, severity);
+       if (ret)
+               return ret;
+
+       ret = bd718x7_get_buck_uvp_info(rdev->desc->id, &reg, &bit);
+       if (ret)
+               return ret;
+
+       if (enable)
+               return regmap_clear_bits(rdev->regmap, reg, bit);
+
+       return regmap_set_bits(rdev->regmap, reg, bit);
+
+}
+
+static int bd718x7_set_buck_ovp(struct regulator_dev *rdev, int lim_uV,
+                               int severity,
+                               bool enable)
+{
+       int bit, reg, ret;
+
+       ret = bd718x7_xvp_sanity_check(rdev, lim_uV, severity);
+       if (ret)
+               return ret;
+
+       ret = bd718x7_get_buck_ovp_info(rdev->desc->id, &reg, &bit);
+       if (ret)
+               return ret;
+
+       if (enable)
+               return regmap_clear_bits(rdev->regmap, reg, bit);
+
+       return regmap_set_bits(rdev->regmap, reg, bit);
+}
+
+/*
+ * OPS common for BD71847 and BD71850
+ */
+BD718XX_OPS(bd718xx_pickable_range_ldo_ops,
+           regulator_list_voltage_pickable_linear_range, NULL,
+           bd718xx_set_voltage_sel_pickable_restricted,
+           regulator_get_voltage_sel_pickable_regmap, NULL, NULL,
+           bd718x7_set_ldo_uvp, NULL);
+
+/* BD71847 and BD71850 LDO 5 is by default OFF at RUN state */
+static const struct regulator_ops bd718xx_ldo5_ops_hwstate = {
+       .is_enabled = never_enabled_by_hwstate,
+       .list_voltage = regulator_list_voltage_pickable_linear_range,
+       .set_voltage_sel = bd718xx_set_voltage_sel_pickable_restricted,
+       .get_voltage_sel = regulator_get_voltage_sel_pickable_regmap,
+       .set_under_voltage_protection = bd718x7_set_ldo_uvp,
+};
+
+BD718XX_OPS(bd718xx_pickable_range_buck_ops,
+           regulator_list_voltage_pickable_linear_range, NULL,
+           regulator_set_voltage_sel_pickable_regmap,
+           regulator_get_voltage_sel_pickable_regmap,
+           regulator_set_voltage_time_sel, NULL, bd718x7_set_buck_uvp,
+           bd718x7_set_buck_ovp);
+
+BD718XX_OPS(bd718xx_ldo_regulator_ops, regulator_list_voltage_linear_range,
+           NULL, bd718xx_set_voltage_sel_restricted,
+           regulator_get_voltage_sel_regmap, NULL, NULL, bd718x7_set_ldo_uvp,
+           NULL);
+
+BD718XX_OPS(bd718xx_ldo_regulator_nolinear_ops, regulator_list_voltage_table,
+           NULL, bd718xx_set_voltage_sel_restricted,
+           regulator_get_voltage_sel_regmap, NULL, NULL, bd718x7_set_ldo_uvp,
+           NULL);
+
+BD718XX_OPS(bd718xx_buck_regulator_ops, regulator_list_voltage_linear_range,
+           NULL, regulator_set_voltage_sel_regmap,
+           regulator_get_voltage_sel_regmap, regulator_set_voltage_time_sel,
+           NULL, bd718x7_set_buck_uvp, bd718x7_set_buck_ovp);
+
+BD718XX_OPS(bd718xx_buck_regulator_nolinear_ops, regulator_list_voltage_table,
+           regulator_map_voltage_ascend, regulator_set_voltage_sel_regmap,
+           regulator_get_voltage_sel_regmap, regulator_set_voltage_time_sel,
+           NULL, bd718x7_set_buck_uvp, bd718x7_set_buck_ovp);
+
+/*
+ * OPS for BD71837
+ */
+BD718XX_OPS(bd71837_pickable_range_ldo_ops,
+           regulator_list_voltage_pickable_linear_range, NULL,
+           bd71837_set_voltage_sel_pickable_restricted,
+           regulator_get_voltage_sel_pickable_regmap, NULL, NULL,
+           bd718x7_set_ldo_uvp, NULL);
+
+BD718XX_OPS(bd71837_pickable_range_buck_ops,
+           regulator_list_voltage_pickable_linear_range, NULL,
+           bd71837_set_voltage_sel_pickable_restricted,
+           regulator_get_voltage_sel_pickable_regmap,
+           regulator_set_voltage_time_sel, NULL, bd718x7_set_buck_uvp,
+           bd718x7_set_buck_ovp);
+
+BD718XX_OPS(bd71837_ldo_regulator_ops, regulator_list_voltage_linear_range,
+           NULL, bd71837_set_voltage_sel_restricted,
+           regulator_get_voltage_sel_regmap, NULL, NULL, bd718x7_set_ldo_uvp,
+           NULL);
+
+BD718XX_OPS(bd71837_ldo_regulator_nolinear_ops, regulator_list_voltage_table,
+           NULL, bd71837_set_voltage_sel_restricted,
+           regulator_get_voltage_sel_regmap, NULL, NULL, bd718x7_set_ldo_uvp,
+           NULL);
+
+BD718XX_OPS(bd71837_buck_regulator_ops, regulator_list_voltage_linear_range,
+           NULL, bd71837_set_voltage_sel_restricted,
+           regulator_get_voltage_sel_regmap, regulator_set_voltage_time_sel,
+           NULL, bd718x7_set_buck_uvp, bd718x7_set_buck_ovp);
+
+BD718XX_OPS(bd71837_buck_regulator_nolinear_ops, regulator_list_voltage_table,
+           regulator_map_voltage_ascend, bd71837_set_voltage_sel_restricted,
+           regulator_get_voltage_sel_regmap, regulator_set_voltage_time_sel,
+           NULL, bd718x7_set_buck_uvp, bd718x7_set_buck_ovp);
+/*
+ * BD71837 bucks 3 and 4 support defining their enable/disable state also
+ * when buck enable state is under HW state machine control. In that case the
+ * bit [2] in CTRL register is used to indicate if regulator should be ON.
+ */
+static const struct regulator_ops bd71837_buck34_ops_hwctrl = {
+       .is_enabled = bd71837_get_buck34_enable_hwctrl,
+       .list_voltage = regulator_list_voltage_linear_range,
+       .set_voltage_sel = regulator_set_voltage_sel_regmap,
+       .get_voltage_sel = regulator_get_voltage_sel_regmap,
+       .set_voltage_time_sel = regulator_set_voltage_time_sel,
+       .set_ramp_delay = regulator_set_ramp_delay_regmap,
+       .set_under_voltage_protection = bd718x7_set_buck_uvp,
+       .set_over_voltage_protection = bd718x7_set_buck_ovp,
+};
+
+/*
+ * OPS for all of the ICs - BD718(37/47/50)
+ */
+BD718XX_OPS(bd718xx_dvs_buck_regulator_ops, regulator_list_voltage_linear_range,
+           NULL, regulator_set_voltage_sel_regmap,
+           regulator_get_voltage_sel_regmap, regulator_set_voltage_time_sel,
+           regulator_set_ramp_delay_regmap, bd718x7_set_buck_uvp,
+           bd718x7_set_buck_ovp);
+
+
+
 /*
  * There is a HW quirk in BD71837. The shutdown sequence timings for
  * bucks/LDOs which are controlled via register interface are changed.
index cf7d534..82f52a2 100644 (file)
@@ -412,6 +412,134 @@ static int da9063_ldo_set_suspend_mode(struct regulator_dev *rdev,
        return regmap_field_write(regl->suspend_sleep, val);
 }
 
+static unsigned int da9063_get_overdrive_mask(const struct regulator_desc *desc)
+{
+       switch (desc->id) {
+       case DA9063_ID_BCORES_MERGED:
+       case DA9063_ID_BCORE1:
+               return DA9063_BCORE1_OD;
+       case DA9063_ID_BCORE2:
+               return DA9063_BCORE2_OD;
+       case DA9063_ID_BPRO:
+               return DA9063_BPRO_OD;
+       default:
+               return 0;
+       }
+}
+
+static int da9063_buck_set_limit_set_overdrive(struct regulator_dev *rdev,
+                                              int min_uA, int max_uA,
+                                              unsigned int overdrive_mask)
+{
+       /*
+        * When enabling overdrive, do it before changing the current limit to
+        * ensure sufficient supply throughout the switch.
+        */
+       struct da9063_regulator *regl = rdev_get_drvdata(rdev);
+       int ret;
+       unsigned int orig_overdrive;
+
+       ret = regmap_read(regl->hw->regmap, DA9063_REG_CONFIG_H,
+                         &orig_overdrive);
+       if (ret < 0)
+               return ret;
+       orig_overdrive &= overdrive_mask;
+
+       if (orig_overdrive == 0) {
+               ret = regmap_set_bits(regl->hw->regmap, DA9063_REG_CONFIG_H,
+                               overdrive_mask);
+               if (ret < 0)
+                       return ret;
+       }
+
+       ret = regulator_set_current_limit_regmap(rdev, min_uA / 2, max_uA / 2);
+       if (ret < 0 && orig_overdrive == 0)
+               /*
+                * regulator_set_current_limit_regmap may have rejected the
+                * change because of unusable min_uA and/or max_uA inputs.
+                * Attempt to restore original overdrive state, ignore failure-
+                * on-failure.
+                */
+               regmap_clear_bits(regl->hw->regmap, DA9063_REG_CONFIG_H,
+                                 overdrive_mask);
+
+       return ret;
+}
+
+static int da9063_buck_set_limit_clear_overdrive(struct regulator_dev *rdev,
+                                                int min_uA, int max_uA,
+                                                unsigned int overdrive_mask)
+{
+       /*
+        * When disabling overdrive, do it after changing the current limit to
+        * ensure sufficient supply throughout the switch.
+        */
+       struct da9063_regulator *regl = rdev_get_drvdata(rdev);
+       int ret, orig_limit;
+
+       ret = regmap_read(rdev->regmap, rdev->desc->csel_reg, &orig_limit);
+       if (ret < 0)
+               return ret;
+
+       ret = regulator_set_current_limit_regmap(rdev, min_uA, max_uA);
+       if (ret < 0)
+               return ret;
+
+       ret = regmap_clear_bits(regl->hw->regmap, DA9063_REG_CONFIG_H,
+                               overdrive_mask);
+       if (ret < 0)
+               /*
+                * Attempt to restore original current limit, ignore failure-
+                * on-failure.
+                */
+               regmap_write(rdev->regmap, rdev->desc->csel_reg, orig_limit);
+
+       return ret;
+}
+
+static int da9063_buck_set_current_limit(struct regulator_dev *rdev,
+                                        int min_uA, int max_uA)
+{
+       unsigned int overdrive_mask, n_currents;
+
+       overdrive_mask = da9063_get_overdrive_mask(rdev->desc);
+       if (overdrive_mask) {
+               n_currents = rdev->desc->n_current_limits;
+               if (n_currents == 0)
+                       return -EINVAL;
+
+               if (max_uA > rdev->desc->curr_table[n_currents - 1])
+                       return da9063_buck_set_limit_set_overdrive(rdev, min_uA,
+                                                                  max_uA,
+                                                                  overdrive_mask);
+
+               return da9063_buck_set_limit_clear_overdrive(rdev, min_uA,
+                                                            max_uA,
+                                                            overdrive_mask);
+       }
+       return regulator_set_current_limit_regmap(rdev, min_uA, max_uA);
+}
+
+static int da9063_buck_get_current_limit(struct regulator_dev *rdev)
+{
+       struct da9063_regulator *regl = rdev_get_drvdata(rdev);
+       int val, ret, limit;
+       unsigned int mask;
+
+       limit = regulator_get_current_limit_regmap(rdev);
+       if (limit < 0)
+               return limit;
+       mask = da9063_get_overdrive_mask(rdev->desc);
+       if (mask) {
+               ret = regmap_read(regl->hw->regmap, DA9063_REG_CONFIG_H, &val);
+               if (ret < 0)
+                       return ret;
+               if (val & mask)
+                       limit *= 2;
+       }
+       return limit;
+}
+
 static const struct regulator_ops da9063_buck_ops = {
        .enable                 = regulator_enable_regmap,
        .disable                = regulator_disable_regmap,
@@ -419,8 +547,8 @@ static const struct regulator_ops da9063_buck_ops = {
        .get_voltage_sel        = regulator_get_voltage_sel_regmap,
        .set_voltage_sel        = regulator_set_voltage_sel_regmap,
        .list_voltage           = regulator_list_voltage_linear,
-       .set_current_limit      = regulator_set_current_limit_regmap,
-       .get_current_limit      = regulator_get_current_limit_regmap,
+       .set_current_limit      = da9063_buck_set_current_limit,
+       .get_current_limit      = da9063_buck_get_current_limit,
        .set_mode               = da9063_buck_set_mode,
        .get_mode               = da9063_buck_get_mode,
        .get_status             = da9063_buck_get_status,
index 8b70bfe..a45c1e1 100644 (file)
@@ -117,11 +117,11 @@ ux500_regulator_debug_init(struct platform_device *pdev,
        rdebug.dir = debugfs_create_dir("ux500-regulator", NULL);
 
        /* create "status" file */
-       debugfs_create_file("status", S_IRUGO, rdebug.dir, &pdev->dev,
+       debugfs_create_file("status", 0444, rdebug.dir, &pdev->dev,
                            &ux500_regulator_status_fops);
 
        /* create "power-state-count" file */
-       debugfs_create_file("power-state-count", S_IRUGO, rdebug.dir,
+       debugfs_create_file("power-state-count", 0444, rdebug.dir,
                            &pdev->dev, &ux500_regulator_power_state_cnt_fops);
 
        rdebug.regulator_array = regulator_info;
index a8de0aa..9113233 100644 (file)
@@ -205,35 +205,6 @@ struct regulator_dev *devm_regulator_register(struct device *dev,
 }
 EXPORT_SYMBOL_GPL(devm_regulator_register);
 
-static int devm_rdev_match(struct device *dev, void *res, void *data)
-{
-       struct regulator_dev **r = res;
-       if (!r || !*r) {
-               WARN_ON(!r || !*r);
-               return 0;
-       }
-       return *r == data;
-}
-
-/**
- * devm_regulator_unregister - Resource managed regulator_unregister()
- * @dev:  device to supply
- * @rdev: regulator to free
- *
- * Unregister a regulator registered with devm_regulator_register().
- * Normally this function will not need to be called and the resource
- * management code will ensure that the resource is freed.
- */
-void devm_regulator_unregister(struct device *dev, struct regulator_dev *rdev)
-{
-       int rc;
-
-       rc = devres_release(dev, devm_rdev_release, devm_rdev_match, rdev);
-       if (rc != 0)
-               WARN_ON(rc);
-}
-EXPORT_SYMBOL_GPL(devm_regulator_unregister);
-
 struct regulator_supply_alias_match {
        struct device *dev;
        const char *id;
@@ -296,19 +267,8 @@ int devm_regulator_register_supply_alias(struct device *dev, const char *id,
 }
 EXPORT_SYMBOL_GPL(devm_regulator_register_supply_alias);
 
-/**
- * devm_regulator_unregister_supply_alias - Resource managed
- * regulator_unregister_supply_alias()
- *
- * @dev: device to supply
- * @id:  supply name or regulator ID
- *
- * Unregister an alias registered with
- * devm_regulator_register_supply_alias(). Normally this function
- * will not need to be called and the resource management code
- * will ensure that the resource is freed.
- */
-void devm_regulator_unregister_supply_alias(struct device *dev, const char *id)
+static void devm_regulator_unregister_supply_alias(struct device *dev,
+                                                  const char *id)
 {
        struct regulator_supply_alias_match match;
        int rc;
@@ -321,7 +281,6 @@ void devm_regulator_unregister_supply_alias(struct device *dev, const char *id)
        if (rc != 0)
                WARN_ON(rc);
 }
-EXPORT_SYMBOL_GPL(devm_regulator_unregister_supply_alias);
 
 /**
  * devm_regulator_bulk_register_supply_alias - Managed register
@@ -373,30 +332,6 @@ err:
 }
 EXPORT_SYMBOL_GPL(devm_regulator_bulk_register_supply_alias);
 
-/**
- * devm_regulator_bulk_unregister_supply_alias - Managed unregister
- * multiple aliases
- *
- * @dev:    device to supply
- * @id:     list of supply names or regulator IDs
- * @num_id: number of aliases to unregister
- *
- * Unregister aliases registered with
- * devm_regulator_bulk_register_supply_alias(). Normally this function
- * will not need to be called and the resource management code
- * will ensure that the resource is freed.
- */
-void devm_regulator_bulk_unregister_supply_alias(struct device *dev,
-                                                const char *const *id,
-                                                int num_id)
-{
-       int i;
-
-       for (i = 0; i < num_id; ++i)
-               devm_regulator_unregister_supply_alias(dev, id[i]);
-}
-EXPORT_SYMBOL_GPL(devm_regulator_bulk_unregister_supply_alias);
-
 struct regulator_notifier_match {
        struct regulator *regulator;
        struct notifier_block *nb;
index 3928461..599ad20 100644 (file)
@@ -287,8 +287,9 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
        drvdata->dev = devm_regulator_register(&pdev->dev, &drvdata->desc,
                                               &cfg);
        if (IS_ERR(drvdata->dev)) {
-               ret = PTR_ERR(drvdata->dev);
-               dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret);
+               ret = dev_err_probe(&pdev->dev, PTR_ERR(drvdata->dev),
+                                   "Failed to register regulator: %ld\n",
+                                   PTR_ERR(drvdata->dev));
                return ret;
        }
 
index 845bc3b..662d87a 100644 (file)
@@ -4,7 +4,7 @@
 //
 // Copyright (c) 2013 Linaro Ltd.
 // Copyright (c) 2011 HiSilicon Ltd.
-// Copyright (c) 2020-2021 Huawei Technologies Co., Ltd
+// Copyright (c) 2020-2021 Huawei Technologies Co., Ltd.
 //
 // Guodong Xu <guodong.xu@linaro.org>
 
@@ -27,34 +27,34 @@ struct hi6421_spmi_reg_info {
        u32                     eco_uA;
 };
 
-static const unsigned int ldo3_voltages[] = {
+static const unsigned int range_1v5_to_2v0[] = {
        1500000, 1550000, 1600000, 1650000,
        1700000, 1725000, 1750000, 1775000,
        1800000, 1825000, 1850000, 1875000,
        1900000, 1925000, 1950000, 2000000
 };
 
-static const unsigned int ldo4_voltages[] = {
+static const unsigned int range_1v725_to_1v9[] = {
        1725000, 1750000, 1775000, 1800000,
        1825000, 1850000, 1875000, 1900000
 };
 
-static const unsigned int ldo9_voltages[] = {
+static const unsigned int range_1v75_to_3v3[] = {
        1750000, 1800000, 1825000, 2800000,
        2850000, 2950000, 3000000, 3300000
 };
 
-static const unsigned int ldo15_voltages[] = {
+static const unsigned int range_1v8_to_3v0[] = {
        1800000, 1850000, 2400000, 2600000,
        2700000, 2850000, 2950000, 3000000
 };
 
-static const unsigned int ldo17_voltages[] = {
+static const unsigned int range_2v5_to_3v3[] = {
        2500000, 2600000, 2700000, 2800000,
        3000000, 3100000, 3200000, 3300000
 };
 
-static const unsigned int ldo34_voltages[] = {
+static const unsigned int range_2v6_to_3v3[] = {
        2600000, 2700000, 2800000, 2900000,
        3000000, 3100000, 3200000, 3300000
 };
@@ -73,14 +73,14 @@ static const unsigned int ldo34_voltages[] = {
  */
 #define HI6421V600_LDO(_id, vtable, ereg, emask, vreg,                        \
                       odelay, etime, ecomask, ecoamp)                         \
-       [HI6421V600_##_id] = {                                                 \
+       [hi6421v600_##_id] = {                                                 \
                .desc = {                                                      \
                        .name           = #_id,                                \
                        .of_match        = of_match_ptr(#_id),                 \
                        .regulators_node = of_match_ptr("regulators"),         \
                        .ops            = &hi6421_spmi_ldo_rops,               \
                        .type           = REGULATOR_VOLTAGE,                   \
-                       .id             = HI6421V600_##_id,                    \
+                       .id             = hi6421v600_##_id,                    \
                        .owner          = THIS_MODULE,                         \
                        .volt_table     = vtable,                              \
                        .n_voltages     = ARRAY_SIZE(vtable),                  \
@@ -185,46 +185,46 @@ static const struct regulator_ops hi6421_spmi_ldo_rops = {
 
 /* HI6421v600 regulators with known registers */
 enum hi6421_spmi_regulator_id {
-       HI6421V600_LDO3,
-       HI6421V600_LDO4,
-       HI6421V600_LDO9,
-       HI6421V600_LDO15,
-       HI6421V600_LDO16,
-       HI6421V600_LDO17,
-       HI6421V600_LDO33,
-       HI6421V600_LDO34,
+       hi6421v600_ldo3,
+       hi6421v600_ldo4,
+       hi6421v600_ldo9,
+       hi6421v600_ldo15,
+       hi6421v600_ldo16,
+       hi6421v600_ldo17,
+       hi6421v600_ldo33,
+       hi6421v600_ldo34,
 };
 
 static struct hi6421_spmi_reg_info regulator_info[] = {
-       HI6421V600_LDO(LDO3, ldo3_voltages,
+       HI6421V600_LDO(ldo3, range_1v5_to_2v0,
                       0x16, 0x01, 0x51,
                       20000, 120,
                       0, 0),
-       HI6421V600_LDO(LDO4, ldo4_voltages,
+       HI6421V600_LDO(ldo4, range_1v725_to_1v9,
                       0x17, 0x01, 0x52,
                       20000, 120,
                       0x10, 10000),
-       HI6421V600_LDO(LDO9, ldo9_voltages,
+       HI6421V600_LDO(ldo9, range_1v75_to_3v3,
                       0x1c, 0x01, 0x57,
                       20000, 360,
                       0x10, 10000),
-       HI6421V600_LDO(LDO15, ldo15_voltages,
+       HI6421V600_LDO(ldo15, range_1v8_to_3v0,
                       0x21, 0x01, 0x5c,
                       20000, 360,
                       0x10, 10000),
-       HI6421V600_LDO(LDO16, ldo15_voltages,
+       HI6421V600_LDO(ldo16, range_1v8_to_3v0,
                       0x22, 0x01, 0x5d,
                       20000, 360,
                       0x10, 10000),
-       HI6421V600_LDO(LDO17, ldo17_voltages,
+       HI6421V600_LDO(ldo17, range_2v5_to_3v3,
                       0x23, 0x01, 0x5e,
                       20000, 120,
                       0x10, 10000),
-       HI6421V600_LDO(LDO33, ldo17_voltages,
+       HI6421V600_LDO(ldo33, range_2v5_to_3v3,
                       0x32, 0x01, 0x6d,
                       20000, 120,
                       0, 0),
-       HI6421V600_LDO(LDO34, ldo34_voltages,
+       HI6421V600_LDO(ldo34, range_2v6_to_3v3,
                       0x33, 0x01, 0x6e,
                       20000, 120,
                       0, 0),
index fabe2e5..5227644 100644 (file)
@@ -184,7 +184,7 @@ static irqreturn_t regulator_notifier_isr(int irq, void *data)
         * If retry_count exceeds the given safety limit we call IC specific die
         * handler which can try disabling regulator(s).
         *
-        * If no die handler is given we will just bug() as a last resort.
+        * If no die handler is given we will just power-off as a last resort.
         *
         * We could try disabling all associated rdevs - but we might shoot
         * ourselves in the head and leave the problematic regulator enabled. So
index 0d35be4..eb80278 100644 (file)
@@ -28,18 +28,15 @@ struct mt6358_regulator_info {
        u32 qi;
        const u32 *index_table;
        unsigned int n_table;
-       u32 vsel_shift;
        u32 da_vsel_reg;
        u32 da_vsel_mask;
-       u32 da_vsel_shift;
        u32 modeset_reg;
        u32 modeset_mask;
-       u32 modeset_shift;
 };
 
 #define MT6358_BUCK(match, vreg, min, max, step,               \
        volt_ranges, vosel_mask, _da_vsel_reg, _da_vsel_mask,   \
-       _da_vsel_shift, _modeset_reg, _modeset_shift)           \
+       _modeset_reg, _modeset_shift)           \
 [MT6358_ID_##vreg] = { \
        .desc = {       \
                .name = #vreg,  \
@@ -61,15 +58,13 @@ struct mt6358_regulator_info {
        .qi = BIT(0),   \
        .da_vsel_reg = _da_vsel_reg,    \
        .da_vsel_mask = _da_vsel_mask,  \
-       .da_vsel_shift = _da_vsel_shift,        \
        .modeset_reg = _modeset_reg,    \
        .modeset_mask = BIT(_modeset_shift),    \
-       .modeset_shift = _modeset_shift \
 }
 
 #define MT6358_LDO(match, vreg, ldo_volt_table,        \
        ldo_index_table, enreg, enbit, vosel,   \
-       vosel_mask, vosel_shift)        \
+       vosel_mask)     \
 [MT6358_ID_##vreg] = { \
        .desc = {       \
                .name = #vreg,  \
@@ -89,12 +84,11 @@ struct mt6358_regulator_info {
        .qi = BIT(15),  \
        .index_table = ldo_index_table, \
        .n_table = ARRAY_SIZE(ldo_index_table), \
-       .vsel_shift = vosel_shift,      \
 }
 
 #define MT6358_LDO1(match, vreg, min, max, step,       \
        volt_ranges, _da_vsel_reg, _da_vsel_mask,       \
-       _da_vsel_shift, vosel, vosel_mask)      \
+       vosel, vosel_mask)      \
 [MT6358_ID_##vreg] = { \
        .desc = {       \
                .name = #vreg,  \
@@ -113,7 +107,6 @@ struct mt6358_regulator_info {
        },      \
        .da_vsel_reg = _da_vsel_reg,    \
        .da_vsel_mask = _da_vsel_mask,  \
-       .da_vsel_shift = _da_vsel_shift,        \
        .status_reg = MT6358_LDO_##vreg##_DBG1, \
        .qi = BIT(0),   \
 }
@@ -260,9 +253,9 @@ static int mt6358_set_voltage_sel(struct regulator_dev *rdev,
        pvol = info->index_table;
 
        idx = pvol[selector];
+       idx <<= ffs(info->desc.vsel_mask) - 1;
        ret = regmap_update_bits(rdev->regmap, info->desc.vsel_reg,
-                                info->desc.vsel_mask,
-                                idx << info->vsel_shift);
+                                info->desc.vsel_mask, idx);
 
        return ret;
 }
@@ -282,7 +275,8 @@ static int mt6358_get_voltage_sel(struct regulator_dev *rdev)
                return ret;
        }
 
-       selector = (selector & info->desc.vsel_mask) >> info->vsel_shift;
+       selector = (selector & info->desc.vsel_mask) >>
+                       (ffs(info->desc.vsel_mask) - 1);
        pvol = info->index_table;
        for (idx = 0; idx < info->desc.n_voltages; idx++) {
                if (pvol[idx] == selector)
@@ -305,7 +299,7 @@ static int mt6358_get_buck_voltage_sel(struct regulator_dev *rdev)
                return ret;
        }
 
-       ret = (regval >> info->da_vsel_shift) & info->da_vsel_mask;
+       ret = (regval & info->da_vsel_mask) >> (ffs(info->da_vsel_mask) - 1);
 
        return ret;
 }
@@ -342,11 +336,10 @@ static int mt6358_regulator_set_mode(struct regulator_dev *rdev,
                return -EINVAL;
        }
 
-       dev_dbg(&rdev->dev, "mt6358 buck set_mode %#x, %#x, %#x, %#x\n",
-               info->modeset_reg, info->modeset_mask,
-               info->modeset_shift, val);
+       dev_dbg(&rdev->dev, "mt6358 buck set_mode %#x, %#x, %#x\n",
+               info->modeset_reg, info->modeset_mask, val);
 
-       val <<= info->modeset_shift;
+       val <<= ffs(info->modeset_mask) - 1;
 
        return regmap_update_bits(rdev->regmap, info->modeset_reg,
                                  info->modeset_mask, val);
@@ -364,7 +357,7 @@ static unsigned int mt6358_regulator_get_mode(struct regulator_dev *rdev)
                return ret;
        }
 
-       switch ((regval & info->modeset_mask) >> info->modeset_shift) {
+       switch ((regval & info->modeset_mask) >> (ffs(info->modeset_mask) - 1)) {
        case MT6358_BUCK_MODE_AUTO:
                return REGULATOR_MODE_NORMAL;
        case MT6358_BUCK_MODE_FORCE_PWM:
@@ -412,30 +405,30 @@ static const struct regulator_ops mt6358_volt_fixed_ops = {
 static struct mt6358_regulator_info mt6358_regulators[] = {
        MT6358_BUCK("buck_vdram1", VDRAM1, 500000, 2087500, 12500,
                    buck_volt_range2, 0x7f, MT6358_BUCK_VDRAM1_DBG0, 0x7f,
-                   0, MT6358_VDRAM1_ANA_CON0, 8),
+                   MT6358_VDRAM1_ANA_CON0, 8),
        MT6358_BUCK("buck_vcore", VCORE, 500000, 1293750, 6250,
                    buck_volt_range1, 0x7f, MT6358_BUCK_VCORE_DBG0, 0x7f,
-                   0, MT6358_VCORE_VGPU_ANA_CON0, 1),
+                   MT6358_VCORE_VGPU_ANA_CON0, 1),
        MT6358_BUCK("buck_vpa", VPA, 500000, 3650000, 50000,
-                   buck_volt_range3, 0x3f, MT6358_BUCK_VPA_DBG0, 0x3f, 0,
+                   buck_volt_range3, 0x3f, MT6358_BUCK_VPA_DBG0, 0x3f,
                    MT6358_VPA_ANA_CON0, 3),
        MT6358_BUCK("buck_vproc11", VPROC11, 500000, 1293750, 6250,
                    buck_volt_range1, 0x7f, MT6358_BUCK_VPROC11_DBG0, 0x7f,
-                   0, MT6358_VPROC_ANA_CON0, 1),
+                   MT6358_VPROC_ANA_CON0, 1),
        MT6358_BUCK("buck_vproc12", VPROC12, 500000, 1293750, 6250,
                    buck_volt_range1, 0x7f, MT6358_BUCK_VPROC12_DBG0, 0x7f,
-                   0, MT6358_VPROC_ANA_CON0, 2),
+                   MT6358_VPROC_ANA_CON0, 2),
        MT6358_BUCK("buck_vgpu", VGPU, 500000, 1293750, 6250,
-                   buck_volt_range1, 0x7f, MT6358_BUCK_VGPU_ELR0, 0x7f, 0,
+                   buck_volt_range1, 0x7f, MT6358_BUCK_VGPU_ELR0, 0x7f,
                    MT6358_VCORE_VGPU_ANA_CON0, 2),
        MT6358_BUCK("buck_vs2", VS2, 500000, 2087500, 12500,
-                   buck_volt_range2, 0x7f, MT6358_BUCK_VS2_DBG0, 0x7f, 0,
+                   buck_volt_range2, 0x7f, MT6358_BUCK_VS2_DBG0, 0x7f,
                    MT6358_VS2_ANA_CON0, 8),
        MT6358_BUCK("buck_vmodem", VMODEM, 500000, 1293750, 6250,
                    buck_volt_range1, 0x7f, MT6358_BUCK_VMODEM_DBG0, 0x7f,
-                   0, MT6358_VMODEM_ANA_CON0, 8),
+                   MT6358_VMODEM_ANA_CON0, 8),
        MT6358_BUCK("buck_vs1", VS1, 1000000, 2587500, 12500,
-                   buck_volt_range4, 0x7f, MT6358_BUCK_VS1_DBG0, 0x7f, 0,
+                   buck_volt_range4, 0x7f, MT6358_BUCK_VS1_DBG0, 0x7f,
                    MT6358_VS1_ANA_CON0, 8),
        MT6358_REG_FIXED("ldo_vrf12", VRF12,
                         MT6358_LDO_VRF12_CON0, 0, 1200000),
@@ -457,49 +450,49 @@ static struct mt6358_regulator_info mt6358_regulators[] = {
        MT6358_REG_FIXED("ldo_vaud28", VAUD28,
                         MT6358_LDO_VAUD28_CON0, 0, 2800000),
        MT6358_LDO("ldo_vdram2", VDRAM2, vdram2_voltages, vdram2_idx,
-                  MT6358_LDO_VDRAM2_CON0, 0, MT6358_LDO_VDRAM2_ELR0, 0xf, 0),
+                  MT6358_LDO_VDRAM2_CON0, 0, MT6358_LDO_VDRAM2_ELR0, 0xf),
        MT6358_LDO("ldo_vsim1", VSIM1, vsim_voltages, vsim_idx,
-                  MT6358_LDO_VSIM1_CON0, 0, MT6358_VSIM1_ANA_CON0, 0xf00, 8),
+                  MT6358_LDO_VSIM1_CON0, 0, MT6358_VSIM1_ANA_CON0, 0xf00),
        MT6358_LDO("ldo_vibr", VIBR, vibr_voltages, vibr_idx,
-                  MT6358_LDO_VIBR_CON0, 0, MT6358_VIBR_ANA_CON0, 0xf00, 8),
+                  MT6358_LDO_VIBR_CON0, 0, MT6358_VIBR_ANA_CON0, 0xf00),
        MT6358_LDO("ldo_vusb", VUSB, vusb_voltages, vusb_idx,
-                  MT6358_LDO_VUSB_CON0_0, 0, MT6358_VUSB_ANA_CON0, 0x700, 8),
+                  MT6358_LDO_VUSB_CON0_0, 0, MT6358_VUSB_ANA_CON0, 0x700),
        MT6358_LDO("ldo_vcamd", VCAMD, vcamd_voltages, vcamd_idx,
-                  MT6358_LDO_VCAMD_CON0, 0, MT6358_VCAMD_ANA_CON0, 0xf00, 8),
+                  MT6358_LDO_VCAMD_CON0, 0, MT6358_VCAMD_ANA_CON0, 0xf00),
        MT6358_LDO("ldo_vefuse", VEFUSE, vefuse_voltages, vefuse_idx,
-                  MT6358_LDO_VEFUSE_CON0, 0, MT6358_VEFUSE_ANA_CON0, 0xf00, 8),
+                  MT6358_LDO_VEFUSE_CON0, 0, MT6358_VEFUSE_ANA_CON0, 0xf00),
        MT6358_LDO("ldo_vmch", VMCH, vmch_vemc_voltages, vmch_vemc_idx,
-                  MT6358_LDO_VMCH_CON0, 0, MT6358_VMCH_ANA_CON0, 0x700, 8),
+                  MT6358_LDO_VMCH_CON0, 0, MT6358_VMCH_ANA_CON0, 0x700),
        MT6358_LDO("ldo_vcama1", VCAMA1, vcama_voltages, vcama_idx,
-                  MT6358_LDO_VCAMA1_CON0, 0, MT6358_VCAMA1_ANA_CON0, 0xf00, 8),
+                  MT6358_LDO_VCAMA1_CON0, 0, MT6358_VCAMA1_ANA_CON0, 0xf00),
        MT6358_LDO("ldo_vemc", VEMC, vmch_vemc_voltages, vmch_vemc_idx,
-                  MT6358_LDO_VEMC_CON0, 0, MT6358_VEMC_ANA_CON0, 0x700, 8),
+                  MT6358_LDO_VEMC_CON0, 0, MT6358_VEMC_ANA_CON0, 0x700),
        MT6358_LDO("ldo_vcn33_bt", VCN33_BT, vcn33_bt_wifi_voltages,
                   vcn33_bt_wifi_idx, MT6358_LDO_VCN33_CON0_0,
-                  0, MT6358_VCN33_ANA_CON0, 0x300, 8),
+                  0, MT6358_VCN33_ANA_CON0, 0x300),
        MT6358_LDO("ldo_vcn33_wifi", VCN33_WIFI, vcn33_bt_wifi_voltages,
                   vcn33_bt_wifi_idx, MT6358_LDO_VCN33_CON0_1,
-                  0, MT6358_VCN33_ANA_CON0, 0x300, 8),
+                  0, MT6358_VCN33_ANA_CON0, 0x300),
        MT6358_LDO("ldo_vcama2", VCAMA2, vcama_voltages, vcama_idx,
-                  MT6358_LDO_VCAMA2_CON0, 0, MT6358_VCAMA2_ANA_CON0, 0xf00, 8),
+                  MT6358_LDO_VCAMA2_CON0, 0, MT6358_VCAMA2_ANA_CON0, 0xf00),
        MT6358_LDO("ldo_vmc", VMC, vmc_voltages, vmc_idx,
-                  MT6358_LDO_VMC_CON0, 0, MT6358_VMC_ANA_CON0, 0xf00, 8),
+                  MT6358_LDO_VMC_CON0, 0, MT6358_VMC_ANA_CON0, 0xf00),
        MT6358_LDO("ldo_vldo28", VLDO28, vldo28_voltages, vldo28_idx,
                   MT6358_LDO_VLDO28_CON0_0, 0,
-                  MT6358_VLDO28_ANA_CON0, 0x300, 8),
+                  MT6358_VLDO28_ANA_CON0, 0x300),
        MT6358_LDO("ldo_vsim2", VSIM2, vsim_voltages, vsim_idx,
-                  MT6358_LDO_VSIM2_CON0, 0, MT6358_VSIM2_ANA_CON0, 0xf00, 8),
+                  MT6358_LDO_VSIM2_CON0, 0, MT6358_VSIM2_ANA_CON0, 0xf00),
        MT6358_LDO1("ldo_vsram_proc11", VSRAM_PROC11, 500000, 1293750, 6250,
-                   buck_volt_range1, MT6358_LDO_VSRAM_PROC11_DBG0, 0x7f, 8,
+                   buck_volt_range1, MT6358_LDO_VSRAM_PROC11_DBG0, 0x7f00,
                    MT6358_LDO_VSRAM_CON0, 0x7f),
        MT6358_LDO1("ldo_vsram_others", VSRAM_OTHERS, 500000, 1293750, 6250,
-                   buck_volt_range1, MT6358_LDO_VSRAM_OTHERS_DBG0, 0x7f, 8,
+                   buck_volt_range1, MT6358_LDO_VSRAM_OTHERS_DBG0, 0x7f00,
                    MT6358_LDO_VSRAM_CON2, 0x7f),
        MT6358_LDO1("ldo_vsram_gpu", VSRAM_GPU, 500000, 1293750, 6250,
-                   buck_volt_range1, MT6358_LDO_VSRAM_GPU_DBG0, 0x7f, 8,
+                   buck_volt_range1, MT6358_LDO_VSRAM_GPU_DBG0, 0x7f00,
                    MT6358_LDO_VSRAM_CON3, 0x7f),
        MT6358_LDO1("ldo_vsram_proc12", VSRAM_PROC12, 500000, 1293750, 6250,
-                   buck_volt_range1, MT6358_LDO_VSRAM_PROC12_DBG0, 0x7f, 8,
+                   buck_volt_range1, MT6358_LDO_VSRAM_PROC12_DBG0, 0x7f00,
                    MT6358_LDO_VSRAM_CON1, 0x7f),
 };
 
index 7ce0bd3..de3b046 100644 (file)
@@ -27,7 +27,6 @@
  * @qi: Mask for query enable signal status of regulators.
  * @modeset_reg: for operating AUTO/PWM mode register.
  * @modeset_mask: MASK for operating modeset register.
- * @modeset_shift: SHIFT for operating modeset register.
  */
 struct mt6359_regulator_info {
        struct regulator_desc desc;
@@ -35,10 +34,8 @@ struct mt6359_regulator_info {
        u32 qi;
        u32 modeset_reg;
        u32 modeset_mask;
-       u32 modeset_shift;
        u32 lp_mode_reg;
        u32 lp_mode_mask;
-       u32 lp_mode_shift;
 };
 
 #define MT6359_BUCK(match, _name, min, max, step,              \
@@ -68,10 +65,8 @@ struct mt6359_regulator_info {
        .qi = BIT(0),                                           \
        .lp_mode_reg = _lp_mode_reg,                            \
        .lp_mode_mask = BIT(_lp_mode_shift),                    \
-       .lp_mode_shift = _lp_mode_shift,                        \
        .modeset_reg = _modeset_reg,                            \
        .modeset_mask = BIT(_modeset_shift),                    \
-       .modeset_shift = _modeset_shift                         \
 }
 
 #define MT6359_LDO_LINEAR(match, _name, min, max, step,                \
@@ -282,8 +277,10 @@ static unsigned int mt6359_regulator_get_mode(struct regulator_dev *rdev)
                return ret;
        }
 
-       if ((regval & info->modeset_mask) >> info->modeset_shift ==
-               MT6359_BUCK_MODE_FORCE_PWM)
+       regval &= info->modeset_mask;
+       regval >>= ffs(info->modeset_mask) - 1;
+
+       if (regval == MT6359_BUCK_MODE_FORCE_PWM)
                return REGULATOR_MODE_FAST;
 
        ret = regmap_read(rdev->regmap, info->lp_mode_reg, &regval);
@@ -310,7 +307,7 @@ static int mt6359_regulator_set_mode(struct regulator_dev *rdev,
        switch (mode) {
        case REGULATOR_MODE_FAST:
                val = MT6359_BUCK_MODE_FORCE_PWM;
-               val <<= info->modeset_shift;
+               val <<= ffs(info->modeset_mask) - 1;
                ret = regmap_update_bits(rdev->regmap,
                                         info->modeset_reg,
                                         info->modeset_mask,
@@ -319,14 +316,14 @@ static int mt6359_regulator_set_mode(struct regulator_dev *rdev,
        case REGULATOR_MODE_NORMAL:
                if (curr_mode == REGULATOR_MODE_FAST) {
                        val = MT6359_BUCK_MODE_AUTO;
-                       val <<= info->modeset_shift;
+                       val <<= ffs(info->modeset_mask) - 1;
                        ret = regmap_update_bits(rdev->regmap,
                                                 info->modeset_reg,
                                                 info->modeset_mask,
                                                 val);
                } else if (curr_mode == REGULATOR_MODE_IDLE) {
                        val = MT6359_BUCK_MODE_NORMAL;
-                       val <<= info->lp_mode_shift;
+                       val <<= ffs(info->lp_mode_mask) - 1;
                        ret = regmap_update_bits(rdev->regmap,
                                                 info->lp_mode_reg,
                                                 info->lp_mode_mask,
@@ -336,7 +333,7 @@ static int mt6359_regulator_set_mode(struct regulator_dev *rdev,
                break;
        case REGULATOR_MODE_IDLE:
                val = MT6359_BUCK_MODE_LP >> 1;
-               val <<= info->lp_mode_shift;
+               val <<= ffs(info->lp_mode_mask) - 1;
                ret = regmap_update_bits(rdev->regmap,
                                         info->lp_mode_reg,
                                         info->lp_mode_mask,
index 0a30df5..b9bf7ad 100644 (file)
@@ -32,7 +32,6 @@ struct mt6397_regulator_info {
        u32 vselctrl_mask;
        u32 modeset_reg;
        u32 modeset_mask;
-       u32 modeset_shift;
 };
 
 #define MT6397_BUCK(match, vreg, min, max, step, volt_ranges, enreg,   \
@@ -61,7 +60,6 @@ struct mt6397_regulator_info {
        .vselctrl_mask = BIT(1),                                        \
        .modeset_reg = _modeset_reg,                                    \
        .modeset_mask = BIT(_modeset_shift),                            \
-       .modeset_shift = _modeset_shift                                 \
 }
 
 #define MT6397_LDO(match, vreg, ldo_volt_table, enreg, enbit, vosel,   \
@@ -175,11 +173,11 @@ static int mt6397_regulator_set_mode(struct regulator_dev *rdev,
                goto err_mode;
        }
 
-       dev_dbg(&rdev->dev, "mt6397 buck set_mode %#x, %#x, %#x, %#x\n",
-               info->modeset_reg, info->modeset_mask,
-               info->modeset_shift, val);
+       dev_dbg(&rdev->dev, "mt6397 buck set_mode %#x, %#x, %#x\n",
+               info->modeset_reg, info->modeset_mask, val);
+
+       val <<= ffs(info->modeset_mask) - 1;
 
-       val <<= info->modeset_shift;
        ret = regmap_update_bits(rdev->regmap, info->modeset_reg,
                                 info->modeset_mask, val);
 err_mode:
@@ -204,7 +202,10 @@ static unsigned int mt6397_regulator_get_mode(struct regulator_dev *rdev)
                return ret;
        }
 
-       switch ((regval & info->modeset_mask) >> info->modeset_shift) {
+       regval &= info->modeset_mask;
+       regval >>= ffs(info->modeset_mask) - 1;
+
+       switch (regval) {
        case MT6397_BUCK_MODE_AUTO:
                return REGULATOR_MODE_NORMAL;
        case MT6397_BUCK_MODE_FORCE_PWM:
index 0e73116..da4cf5a 100644 (file)
 #include <linux/mfd/rt5033-private.h>
 #include <linux/regulator/of_regulator.h>
 
+static const struct linear_range rt5033_buck_ranges[] = {
+       REGULATOR_LINEAR_RANGE(1000000, 0, 20, 100000),
+       REGULATOR_LINEAR_RANGE(3000000, 21, 31, 0),
+};
+
+static const struct linear_range rt5033_ldo_ranges[] = {
+       REGULATOR_LINEAR_RANGE(1200000, 0, 18, 100000),
+       REGULATOR_LINEAR_RANGE(3000000, 19, 31, 0),
+};
+
 static const struct regulator_ops rt5033_safe_ldo_ops = {
        .is_enabled             = regulator_is_enabled_regmap,
        .enable                 = regulator_enable_regmap,
@@ -24,8 +34,7 @@ static const struct regulator_ops rt5033_buck_ops = {
        .is_enabled             = regulator_is_enabled_regmap,
        .enable                 = regulator_enable_regmap,
        .disable                = regulator_disable_regmap,
-       .list_voltage           = regulator_list_voltage_linear,
-       .map_voltage            = regulator_map_voltage_linear,
+       .list_voltage           = regulator_list_voltage_linear_range,
        .get_voltage_sel        = regulator_get_voltage_sel_regmap,
        .set_voltage_sel        = regulator_set_voltage_sel_regmap,
 };
@@ -40,8 +49,8 @@ static const struct regulator_desc rt5033_supported_regulators[] = {
                .type           = REGULATOR_VOLTAGE,
                .owner          = THIS_MODULE,
                .n_voltages     = RT5033_REGULATOR_BUCK_VOLTAGE_STEP_NUM,
-               .min_uV         = RT5033_REGULATOR_BUCK_VOLTAGE_MIN,
-               .uV_step        = RT5033_REGULATOR_BUCK_VOLTAGE_STEP,
+               .linear_ranges  = rt5033_buck_ranges,
+               .n_linear_ranges = ARRAY_SIZE(rt5033_buck_ranges),
                .enable_reg     = RT5033_REG_CTRL,
                .enable_mask    = RT5033_CTRL_EN_BUCK_MASK,
                .vsel_reg       = RT5033_REG_BUCK_CTRL,
@@ -56,8 +65,8 @@ static const struct regulator_desc rt5033_supported_regulators[] = {
                .type           = REGULATOR_VOLTAGE,
                .owner          = THIS_MODULE,
                .n_voltages     = RT5033_REGULATOR_LDO_VOLTAGE_STEP_NUM,
-               .min_uV         = RT5033_REGULATOR_LDO_VOLTAGE_MIN,
-               .uV_step        = RT5033_REGULATOR_LDO_VOLTAGE_STEP,
+               .linear_ranges  = rt5033_ldo_ranges,
+               .n_linear_ranges = ARRAY_SIZE(rt5033_ldo_ranges),
                .enable_reg     = RT5033_REG_CTRL,
                .enable_mask    = RT5033_CTRL_EN_LDO_MASK,
                .vsel_reg       = RT5033_REG_LDO_CTRL,
index d3299a7..cb22a20 100644 (file)
@@ -144,7 +144,7 @@ static int rt6245_init_device_properties(struct device *dev)
 static int rt6245_reg_write(void *context, unsigned int reg, unsigned int val)
 {
        struct i2c_client *i2c = context;
-       const u8 func_base[] = { 0x6F, 0x73, 0x78, 0x61, 0x7C, 0 };
+       static const u8 func_base[] = { 0x6F, 0x73, 0x78, 0x61, 0x7C, 0 };
        unsigned int code, bit_count;
 
        code = func_base[reg];
diff --git a/drivers/regulator/rtq2134-regulator.c b/drivers/regulator/rtq2134-regulator.c
new file mode 100644 (file)
index 0000000..f21e3f8
--- /dev/null
@@ -0,0 +1,373 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/bitops.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+
+enum {
+       RTQ2134_IDX_BUCK1 = 0,
+       RTQ2134_IDX_BUCK2,
+       RTQ2134_IDX_BUCK3,
+       RTQ2134_IDX_MAX
+};
+
+#define RTQ2134_AUTO_MODE              0
+#define RTQ2134_FCCM_MODE              1
+
+#define RTQ2134_BUCK_DVS0_CTRL         0
+#define RTQ2134_BUCK_VSEL_CTRL         2
+
+#define RTQ2134_REG_IO_CHIPNAME                0x01
+#define RTQ2134_REG_FLT_RECORDTEMP     0x13
+#define RTQ2134_REG_FLT_RECORDBUCK(_id)        (0x14 + (_id))
+#define RTQ2134_REG_FLT_BUCKCTRL(_id)  (0x37 + (_id))
+#define RTQ2134_REG_BUCK1_CFG0         0x42
+#define RTQ2134_REG_BUCK1_DVS0CFG1     0x48
+#define RTQ2134_REG_BUCK1_DVS0CFG0     0x49
+#define RTQ2134_REG_BUCK1_DVS1CFG1     0x4A
+#define RTQ2134_REG_BUCK1_DVS1CFG0     0x4B
+#define RTQ2134_REG_BUCK1_DVSCFG       0x52
+#define RTQ2134_REG_BUCK1_RSPCFG       0x54
+#define RTQ2134_REG_BUCK2_CFG0         0x5F
+#define RTQ2134_REG_BUCK2_DVS0CFG1     0x62
+#define RTQ2134_REG_BUCK2_DVS0CFG0     0x63
+#define RTQ2134_REG_BUCK2_DVS1CFG1     0x64
+#define RTQ2134_REG_BUCK2_DVS1CFG0     0x65
+#define RTQ2134_REG_BUCK2_DVSCFG       0x6C
+#define RTQ2134_REG_BUCK2_RSPCFG       0x6E
+#define RTQ2134_REG_BUCK3_CFG0         0x79
+#define RTQ2134_REG_BUCK3_DVS0CFG1     0x7C
+#define RTQ2134_REG_BUCK3_DVS0CFG0     0x7D
+#define RTQ2134_REG_BUCK3_DVS1CFG1     0x7E
+#define RTQ2134_REG_BUCK3_DVS1CFG0     0x7F
+#define RTQ2134_REG_BUCK3_DVSCFG       0x86
+#define RTQ2134_REG_BUCK3_RSPCFG       0x88
+#define RTQ2134_REG_BUCK3_SLEWCTRL     0x89
+
+#define RTQ2134_VOUT_MAXNUM            256
+#define RTQ2134_VOUT_MASK              0xFF
+#define RTQ2134_VOUTEN_MASK            BIT(0)
+#define RTQ2134_ACTDISCHG_MASK         BIT(0)
+#define RTQ2134_RSPUP_MASK             GENMASK(6, 4)
+#define RTQ2134_FCCM_MASK              BIT(5)
+#define RTQ2134_UVHICCUP_MASK          BIT(3)
+#define RTQ2134_BUCKDVS_CTRL_MASK      GENMASK(1, 0)
+#define RTQ2134_CHIPOT_MASK            BIT(2)
+#define RTQ2134_BUCKOV_MASK            BIT(5)
+#define RTQ2134_BUCKUV_MASK            BIT(4)
+
+struct rtq2134_regulator_desc {
+       struct regulator_desc desc;
+       /* Extension for proprietary register and mask */
+       unsigned int mode_reg;
+       unsigned int mode_mask;
+       unsigned int suspend_enable_reg;
+       unsigned int suspend_enable_mask;
+       unsigned int suspend_vsel_reg;
+       unsigned int suspend_vsel_mask;
+       unsigned int suspend_mode_reg;
+       unsigned int suspend_mode_mask;
+       unsigned int dvs_ctrl_reg;
+};
+
+static int rtq2134_buck_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+       struct rtq2134_regulator_desc *desc =
+               (struct rtq2134_regulator_desc *)rdev->desc;
+       unsigned int val;
+
+       if (mode == REGULATOR_MODE_NORMAL)
+               val = RTQ2134_AUTO_MODE;
+       else if (mode == REGULATOR_MODE_FAST)
+               val = RTQ2134_FCCM_MODE;
+       else
+               return -EINVAL;
+
+       val <<= ffs(desc->mode_mask) - 1;
+       return regmap_update_bits(rdev->regmap, desc->mode_reg, desc->mode_mask,
+                                 val);
+}
+
+static unsigned int rtq2134_buck_get_mode(struct regulator_dev *rdev)
+{
+       struct rtq2134_regulator_desc *desc =
+               (struct rtq2134_regulator_desc *)rdev->desc;
+       unsigned int mode;
+       int ret;
+
+       ret = regmap_read(rdev->regmap, desc->mode_reg, &mode);
+       if (ret)
+               return ret;
+
+       if (mode & desc->mode_mask)
+               return REGULATOR_MODE_FAST;
+       return REGULATOR_MODE_NORMAL;
+}
+
+static int rtq2134_buck_set_suspend_voltage(struct regulator_dev *rdev, int uV)
+{
+       struct rtq2134_regulator_desc *desc =
+               (struct rtq2134_regulator_desc *)rdev->desc;
+       int sel;
+
+       sel = regulator_map_voltage_linear_range(rdev, uV, uV);
+       if (sel < 0)
+               return sel;
+
+       sel <<= ffs(desc->suspend_vsel_mask) - 1;
+
+       return regmap_update_bits(rdev->regmap, desc->suspend_vsel_reg,
+                                 desc->suspend_vsel_mask, sel);
+}
+
+static int rtq2134_buck_set_suspend_enable(struct regulator_dev *rdev)
+{
+       struct rtq2134_regulator_desc *desc =
+               (struct rtq2134_regulator_desc *)rdev->desc;
+       unsigned int val = desc->suspend_enable_mask;
+
+       return regmap_update_bits(rdev->regmap, desc->suspend_enable_reg,
+                                 desc->suspend_enable_mask, val);
+}
+
+static int rtq2134_buck_set_suspend_disable(struct regulator_dev *rdev)
+{
+       struct rtq2134_regulator_desc *desc =
+               (struct rtq2134_regulator_desc *)rdev->desc;
+
+       return regmap_update_bits(rdev->regmap, desc->suspend_enable_reg,
+                                 desc->suspend_enable_mask, 0);
+}
+
+static int rtq2134_buck_set_suspend_mode(struct regulator_dev *rdev,
+                                        unsigned int mode)
+{
+       struct rtq2134_regulator_desc *desc =
+               (struct rtq2134_regulator_desc *)rdev->desc;
+       unsigned int val;
+
+       if (mode == REGULATOR_MODE_NORMAL)
+               val = RTQ2134_AUTO_MODE;
+       else if (mode == REGULATOR_MODE_FAST)
+               val = RTQ2134_FCCM_MODE;
+       else
+               return -EINVAL;
+
+       val <<= ffs(desc->suspend_mode_mask) - 1;
+       return regmap_update_bits(rdev->regmap, desc->suspend_mode_reg,
+                                 desc->suspend_mode_mask, val);
+}
+
+static int rtq2134_buck_get_error_flags(struct regulator_dev *rdev,
+                                       unsigned int *flags)
+{
+       int rid = rdev_get_id(rdev);
+       unsigned int chip_error, buck_error, events = 0;
+       int ret;
+
+       ret = regmap_read(rdev->regmap, RTQ2134_REG_FLT_RECORDTEMP,
+                         &chip_error);
+       if (ret) {
+               dev_err(&rdev->dev, "Failed to get chip error flag\n");
+               return ret;
+       }
+
+       ret = regmap_read(rdev->regmap, RTQ2134_REG_FLT_RECORDBUCK(rid),
+                         &buck_error);
+       if (ret) {
+               dev_err(&rdev->dev, "Failed to get buck error flag\n");
+               return ret;
+       }
+
+       if (chip_error & RTQ2134_CHIPOT_MASK)
+               events |= REGULATOR_ERROR_OVER_TEMP;
+
+       if (buck_error & RTQ2134_BUCKUV_MASK)
+               events |= REGULATOR_ERROR_UNDER_VOLTAGE;
+
+       if (buck_error & RTQ2134_BUCKOV_MASK)
+               events |= REGULATOR_ERROR_REGULATION_OUT;
+
+       *flags = events;
+       return 0;
+}
+
+static const struct regulator_ops rtq2134_buck_ops = {
+       .list_voltage = regulator_list_voltage_linear_range,
+       .set_voltage_sel = regulator_set_voltage_sel_regmap,
+       .get_voltage_sel = regulator_get_voltage_sel_regmap,
+       .enable = regulator_enable_regmap,
+       .disable = regulator_disable_regmap,
+       .is_enabled = regulator_is_enabled_regmap,
+       .set_active_discharge = regulator_set_active_discharge_regmap,
+       .set_ramp_delay = regulator_set_ramp_delay_regmap,
+       .set_mode = rtq2134_buck_set_mode,
+       .get_mode = rtq2134_buck_get_mode,
+       .set_suspend_voltage = rtq2134_buck_set_suspend_voltage,
+       .set_suspend_enable = rtq2134_buck_set_suspend_enable,
+       .set_suspend_disable = rtq2134_buck_set_suspend_disable,
+       .set_suspend_mode = rtq2134_buck_set_suspend_mode,
+       .get_error_flags = rtq2134_buck_get_error_flags,
+};
+
+static const struct linear_range rtq2134_buck_vout_ranges[] = {
+       REGULATOR_LINEAR_RANGE(300000, 0, 200, 5000),
+       REGULATOR_LINEAR_RANGE(1310000, 201, 255, 10000)
+};
+
+static unsigned int rtq2134_buck_of_map_mode(unsigned int mode)
+{
+       switch (mode) {
+       case RTQ2134_AUTO_MODE:
+               return REGULATOR_MODE_NORMAL;
+       case RTQ2134_FCCM_MODE:
+               return REGULATOR_MODE_FAST;
+       }
+
+       return REGULATOR_MODE_INVALID;
+}
+
+static int rtq2134_buck_of_parse_cb(struct device_node *np,
+                                   const struct regulator_desc *desc,
+                                   struct regulator_config *cfg)
+{
+       struct rtq2134_regulator_desc *rdesc =
+               (struct rtq2134_regulator_desc *)desc;
+       int rid = desc->id;
+       bool uv_shutdown, vsel_dvs;
+       unsigned int val;
+       int ret;
+
+       vsel_dvs = of_property_read_bool(np, "richtek,use-vsel-dvs");
+       if (vsel_dvs)
+               val = RTQ2134_BUCK_VSEL_CTRL;
+       else
+               val = RTQ2134_BUCK_DVS0_CTRL;
+
+       ret = regmap_update_bits(cfg->regmap, rdesc->dvs_ctrl_reg,
+                                RTQ2134_BUCKDVS_CTRL_MASK, val);
+       if (ret)
+               return ret;
+
+       uv_shutdown = of_property_read_bool(np, "richtek,uv-shutdown");
+       if (uv_shutdown)
+               val = 0;
+       else
+               val = RTQ2134_UVHICCUP_MASK;
+
+       return regmap_update_bits(cfg->regmap, RTQ2134_REG_FLT_BUCKCTRL(rid),
+                                 RTQ2134_UVHICCUP_MASK, val);
+}
+
+static const unsigned int rtq2134_buck_ramp_delay_table[] = {
+       0, 16000, 0, 8000, 4000, 2000, 1000, 500
+};
+
+#define RTQ2134_BUCK_DESC(_id) { \
+       .desc = { \
+               .name = "rtq2134_buck" #_id, \
+               .of_match = of_match_ptr("buck" #_id), \
+               .regulators_node = of_match_ptr("regulators"), \
+               .id = RTQ2134_IDX_BUCK##_id, \
+               .type = REGULATOR_VOLTAGE, \
+               .owner = THIS_MODULE, \
+               .ops = &rtq2134_buck_ops, \
+               .n_voltages = RTQ2134_VOUT_MAXNUM, \
+               .linear_ranges = rtq2134_buck_vout_ranges, \
+               .n_linear_ranges = ARRAY_SIZE(rtq2134_buck_vout_ranges), \
+               .vsel_reg = RTQ2134_REG_BUCK##_id##_DVS0CFG1, \
+               .vsel_mask = RTQ2134_VOUT_MASK, \
+               .enable_reg = RTQ2134_REG_BUCK##_id##_DVS0CFG0, \
+               .enable_mask = RTQ2134_VOUTEN_MASK, \
+               .active_discharge_reg = RTQ2134_REG_BUCK##_id##_CFG0, \
+               .active_discharge_mask = RTQ2134_ACTDISCHG_MASK, \
+               .ramp_reg = RTQ2134_REG_BUCK##_id##_RSPCFG, \
+               .ramp_mask = RTQ2134_RSPUP_MASK, \
+               .ramp_delay_table = rtq2134_buck_ramp_delay_table, \
+               .n_ramp_values = ARRAY_SIZE(rtq2134_buck_ramp_delay_table), \
+               .of_map_mode = rtq2134_buck_of_map_mode, \
+               .of_parse_cb = rtq2134_buck_of_parse_cb, \
+       }, \
+       .mode_reg = RTQ2134_REG_BUCK##_id##_DVS0CFG0, \
+       .mode_mask = RTQ2134_FCCM_MASK, \
+       .suspend_mode_reg = RTQ2134_REG_BUCK##_id##_DVS1CFG0, \
+       .suspend_mode_mask = RTQ2134_FCCM_MASK, \
+       .suspend_enable_reg = RTQ2134_REG_BUCK##_id##_DVS1CFG0, \
+       .suspend_enable_mask = RTQ2134_VOUTEN_MASK, \
+       .suspend_vsel_reg = RTQ2134_REG_BUCK##_id##_DVS1CFG1, \
+       .suspend_vsel_mask = RTQ2134_VOUT_MASK, \
+       .dvs_ctrl_reg = RTQ2134_REG_BUCK##_id##_DVSCFG, \
+}
+
+static const struct rtq2134_regulator_desc rtq2134_regulator_descs[] = {
+       RTQ2134_BUCK_DESC(1),
+       RTQ2134_BUCK_DESC(2),
+       RTQ2134_BUCK_DESC(3)
+};
+
+static bool rtq2134_is_accissible_reg(struct device *dev, unsigned int reg)
+{
+       if (reg >= RTQ2134_REG_IO_CHIPNAME && reg <= RTQ2134_REG_BUCK3_SLEWCTRL)
+               return true;
+       return false;
+}
+
+static const struct regmap_config rtq2134_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 8,
+       .max_register = RTQ2134_REG_BUCK3_SLEWCTRL,
+
+       .readable_reg = rtq2134_is_accissible_reg,
+       .writeable_reg = rtq2134_is_accissible_reg,
+};
+
+static int rtq2134_probe(struct i2c_client *i2c)
+{
+       struct regmap *regmap;
+       struct regulator_dev *rdev;
+       struct regulator_config regulator_cfg = {};
+       int i;
+
+       regmap = devm_regmap_init_i2c(i2c, &rtq2134_regmap_config);
+       if (IS_ERR(regmap)) {
+               dev_err(&i2c->dev, "Failed to allocate regmap\n");
+               return PTR_ERR(regmap);
+       }
+
+       regulator_cfg.dev = &i2c->dev;
+       regulator_cfg.regmap = regmap;
+       for (i = 0; i < ARRAY_SIZE(rtq2134_regulator_descs); i++) {
+               rdev = devm_regulator_register(&i2c->dev,
+                                              &rtq2134_regulator_descs[i].desc,
+                                              &regulator_cfg);
+               if (IS_ERR(rdev)) {
+                       dev_err(&i2c->dev, "Failed to init %d regulator\n", i);
+                       return PTR_ERR(rdev);
+               }
+       }
+
+       return 0;
+}
+
+static const struct of_device_id __maybe_unused rtq2134_device_tables[] = {
+       { .compatible = "richtek,rtq2134", },
+       {}
+};
+MODULE_DEVICE_TABLE(of, rtq2134_device_tables);
+
+static struct i2c_driver rtq2134_driver = {
+       .driver = {
+               .name = "rtq2134",
+               .of_match_table = rtq2134_device_tables,
+       },
+       .probe_new = rtq2134_probe,
+};
+module_i2c_driver(rtq2134_driver);
+
+MODULE_AUTHOR("ChiYuan Huang <cy_huang@richtek.com>");
+MODULE_DESCRIPTION("Richtek RTQ2134 Regulator Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/rtq6752-regulator.c b/drivers/regulator/rtq6752-regulator.c
new file mode 100644 (file)
index 0000000..609d3fc
--- /dev/null
@@ -0,0 +1,289 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+
+enum {
+       RTQ6752_IDX_PAVDD = 0,
+       RTQ6752_IDX_NAVDD = 1,
+       RTQ6752_IDX_MAX
+};
+
+#define RTQ6752_REG_PAVDD      0x00
+#define RTQ6752_REG_NAVDD      0x01
+#define RTQ6752_REG_PAVDDONDLY 0x07
+#define RTQ6752_REG_PAVDDSSTIME        0x08
+#define RTQ6752_REG_NAVDDONDLY 0x0D
+#define RTQ6752_REG_NAVDDSSTIME        0x0E
+#define RTQ6752_REG_OPTION1    0x12
+#define RTQ6752_REG_CHSWITCH   0x16
+#define RTQ6752_REG_FAULT      0x1D
+
+#define RTQ6752_VOUT_MASK      GENMASK(5, 0)
+#define RTQ6752_NAVDDEN_MASK   BIT(3)
+#define RTQ6752_PAVDDEN_MASK   BIT(0)
+#define RTQ6752_PAVDDAD_MASK   BIT(4)
+#define RTQ6752_NAVDDAD_MASK   BIT(3)
+#define RTQ6752_PAVDDF_MASK    BIT(3)
+#define RTQ6752_NAVDDF_MASK    BIT(0)
+#define RTQ6752_ENABLE_MASK    (BIT(RTQ6752_IDX_MAX) - 1)
+
+#define RTQ6752_VOUT_MINUV     5000000
+#define RTQ6752_VOUT_STEPUV    50000
+#define RTQ6752_VOUT_NUM       47
+#define RTQ6752_I2CRDY_TIMEUS  1000
+#define RTQ6752_MINSS_TIMEUS   5000
+
+struct rtq6752_priv {
+       struct regmap *regmap;
+       struct gpio_desc *enable_gpio;
+       struct mutex lock;
+       unsigned char enable_flag;
+};
+
+static int rtq6752_set_vdd_enable(struct regulator_dev *rdev)
+{
+       struct rtq6752_priv *priv = rdev_get_drvdata(rdev);
+       int rid = rdev_get_id(rdev), ret;
+
+       mutex_lock(&priv->lock);
+       if (priv->enable_gpio) {
+               gpiod_set_value(priv->enable_gpio, 1);
+
+               usleep_range(RTQ6752_I2CRDY_TIMEUS,
+                            RTQ6752_I2CRDY_TIMEUS + 100);
+       }
+
+       if (!priv->enable_flag) {
+               regcache_cache_only(priv->regmap, false);
+               ret = regcache_sync(priv->regmap);
+               if (ret) {
+                       mutex_unlock(&priv->lock);
+                       return ret;
+               }
+       }
+
+       priv->enable_flag |= BIT(rid);
+       mutex_unlock(&priv->lock);
+
+       return regulator_enable_regmap(rdev);
+}
+
+static int rtq6752_set_vdd_disable(struct regulator_dev *rdev)
+{
+       struct rtq6752_priv *priv = rdev_get_drvdata(rdev);
+       int rid = rdev_get_id(rdev), ret;
+
+       ret = regulator_disable_regmap(rdev);
+       if (ret)
+               return ret;
+
+       mutex_lock(&priv->lock);
+       priv->enable_flag &= ~BIT(rid);
+
+       if (!priv->enable_flag) {
+               regcache_cache_only(priv->regmap, true);
+               regcache_mark_dirty(priv->regmap);
+       }
+
+       if (priv->enable_gpio)
+               gpiod_set_value(priv->enable_gpio, 0);
+
+       mutex_unlock(&priv->lock);
+
+       return 0;
+}
+
+static int rtq6752_get_error_flags(struct regulator_dev *rdev,
+                                  unsigned int *flags)
+{
+       unsigned int val, events = 0;
+       const unsigned int fault_mask[] = {
+               RTQ6752_PAVDDF_MASK, RTQ6752_NAVDDF_MASK };
+       int rid = rdev_get_id(rdev), ret;
+
+       ret = regmap_read(rdev->regmap, RTQ6752_REG_FAULT, &val);
+       if (ret)
+               return ret;
+
+       if (val & fault_mask[rid])
+               events = REGULATOR_ERROR_REGULATION_OUT;
+
+       *flags = events;
+       return 0;
+}
+
+static const struct regulator_ops rtq6752_regulator_ops = {
+       .list_voltage = regulator_list_voltage_linear,
+       .set_voltage_sel = regulator_set_voltage_sel_regmap,
+       .get_voltage_sel = regulator_get_voltage_sel_regmap,
+       .enable = rtq6752_set_vdd_enable,
+       .disable = rtq6752_set_vdd_disable,
+       .is_enabled = regulator_is_enabled_regmap,
+       .set_active_discharge = regulator_set_active_discharge_regmap,
+       .get_error_flags = rtq6752_get_error_flags,
+};
+
+static const struct regulator_desc rtq6752_regulator_descs[] = {
+       {
+               .name = "rtq6752-pavdd",
+               .of_match = of_match_ptr("pavdd"),
+               .regulators_node = of_match_ptr("regulators"),
+               .id = RTQ6752_IDX_PAVDD,
+               .n_voltages = RTQ6752_VOUT_NUM,
+               .ops = &rtq6752_regulator_ops,
+               .owner = THIS_MODULE,
+               .min_uV = RTQ6752_VOUT_MINUV,
+               .uV_step = RTQ6752_VOUT_STEPUV,
+               .enable_time = RTQ6752_MINSS_TIMEUS,
+               .vsel_reg = RTQ6752_REG_PAVDD,
+               .vsel_mask = RTQ6752_VOUT_MASK,
+               .enable_reg = RTQ6752_REG_CHSWITCH,
+               .enable_mask = RTQ6752_PAVDDEN_MASK,
+               .active_discharge_reg = RTQ6752_REG_OPTION1,
+               .active_discharge_mask = RTQ6752_PAVDDAD_MASK,
+               .active_discharge_off = RTQ6752_PAVDDAD_MASK,
+       },
+       {
+               .name = "rtq6752-navdd",
+               .of_match = of_match_ptr("navdd"),
+               .regulators_node = of_match_ptr("regulators"),
+               .id = RTQ6752_IDX_NAVDD,
+               .n_voltages = RTQ6752_VOUT_NUM,
+               .ops = &rtq6752_regulator_ops,
+               .owner = THIS_MODULE,
+               .min_uV = RTQ6752_VOUT_MINUV,
+               .uV_step = RTQ6752_VOUT_STEPUV,
+               .enable_time = RTQ6752_MINSS_TIMEUS,
+               .vsel_reg = RTQ6752_REG_NAVDD,
+               .vsel_mask = RTQ6752_VOUT_MASK,
+               .enable_reg = RTQ6752_REG_CHSWITCH,
+               .enable_mask = RTQ6752_NAVDDEN_MASK,
+               .active_discharge_reg = RTQ6752_REG_OPTION1,
+               .active_discharge_mask = RTQ6752_NAVDDAD_MASK,
+               .active_discharge_off = RTQ6752_NAVDDAD_MASK,
+       }
+};
+
+static int rtq6752_init_device_properties(struct rtq6752_priv *priv)
+{
+       u8 raw_vals[] = { 0, 0 };
+       int ret;
+
+       /* Configure PAVDD on and softstart delay time to the minimum */
+       ret = regmap_raw_write(priv->regmap, RTQ6752_REG_PAVDDONDLY, raw_vals,
+                              ARRAY_SIZE(raw_vals));
+       if (ret)
+               return ret;
+
+       /* Configure NAVDD on and softstart delay time to the minimum */
+       return regmap_raw_write(priv->regmap, RTQ6752_REG_NAVDDONDLY, raw_vals,
+                               ARRAY_SIZE(raw_vals));
+}
+
+static bool rtq6752_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+       if (reg == RTQ6752_REG_FAULT)
+               return true;
+       return false;
+}
+
+static const struct reg_default rtq6752_reg_defaults[] = {
+       { RTQ6752_REG_PAVDD, 0x14 },
+       { RTQ6752_REG_NAVDD, 0x14 },
+       { RTQ6752_REG_PAVDDONDLY, 0x01 },
+       { RTQ6752_REG_PAVDDSSTIME, 0x01 },
+       { RTQ6752_REG_NAVDDONDLY, 0x01 },
+       { RTQ6752_REG_NAVDDSSTIME, 0x01 },
+       { RTQ6752_REG_OPTION1, 0x07 },
+       { RTQ6752_REG_CHSWITCH, 0x29 },
+};
+
+static const struct regmap_config rtq6752_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 8,
+       .cache_type = REGCACHE_RBTREE,
+       .max_register = RTQ6752_REG_FAULT,
+       .reg_defaults = rtq6752_reg_defaults,
+       .num_reg_defaults = ARRAY_SIZE(rtq6752_reg_defaults),
+       .volatile_reg = rtq6752_is_volatile_reg,
+};
+
+static int rtq6752_probe(struct i2c_client *i2c)
+{
+       struct rtq6752_priv *priv;
+       struct regulator_config reg_cfg = {};
+       struct regulator_dev *rdev;
+       int i, ret;
+
+       priv = devm_kzalloc(&i2c->dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       mutex_init(&priv->lock);
+
+       priv->enable_gpio = devm_gpiod_get_optional(&i2c->dev, "enable",
+                                                   GPIOD_OUT_HIGH);
+       if (IS_ERR(priv->enable_gpio)) {
+               dev_err(&i2c->dev, "Failed to get 'enable' gpio\n");
+               return PTR_ERR(priv->enable_gpio);
+       }
+
+       usleep_range(RTQ6752_I2CRDY_TIMEUS, RTQ6752_I2CRDY_TIMEUS + 100);
+       /* Default EN pin to high, PAVDD and NAVDD will be on */
+       priv->enable_flag = RTQ6752_ENABLE_MASK;
+
+       priv->regmap = devm_regmap_init_i2c(i2c, &rtq6752_regmap_config);
+       if (IS_ERR(priv->regmap)) {
+               dev_err(&i2c->dev, "Failed to init regmap\n");
+               return PTR_ERR(priv->regmap);
+       }
+
+       ret = rtq6752_init_device_properties(priv);
+       if (ret) {
+               dev_err(&i2c->dev, "Failed to init device properties\n");
+               return ret;
+       }
+
+       reg_cfg.dev = &i2c->dev;
+       reg_cfg.regmap = priv->regmap;
+       reg_cfg.driver_data = priv;
+
+       for (i = 0; i < ARRAY_SIZE(rtq6752_regulator_descs); i++) {
+               rdev = devm_regulator_register(&i2c->dev,
+                                              rtq6752_regulator_descs + i,
+                                              &reg_cfg);
+               if (IS_ERR(rdev)) {
+                       dev_err(&i2c->dev, "Failed to init %d regulator\n", i);
+                       return PTR_ERR(rdev);
+               }
+       }
+
+       return 0;
+}
+
+static const struct of_device_id __maybe_unused rtq6752_device_table[] = {
+       { .compatible = "richtek,rtq6752", },
+       {}
+};
+MODULE_DEVICE_TABLE(of, rtq6752_device_table);
+
+static struct i2c_driver rtq6752_driver = {
+       .driver = {
+               .name = "rtq6752",
+               .of_match_table = rtq6752_device_table,
+       },
+       .probe_new = rtq6752_probe,
+};
+module_i2c_driver(rtq6752_driver);
+
+MODULE_AUTHOR("ChiYuan Huang <cy_huang@richtek.com>");
+MODULE_DESCRIPTION("Richtek RTQ6752 Regulator Driver");
+MODULE_LICENSE("GPL v2");
index e021ae0..8360b39 100644 (file)
 #include <linux/gpio/consumer.h>
 #include <linux/mfd/sy7636a.h>
 
-#define SY7636A_POLL_ENABLED_TIME 500
+struct sy7636a_data {
+       struct regmap *regmap;
+       struct gpio_desc *pgood_gpio;
+};
 
 static int sy7636a_get_vcom_voltage_op(struct regulator_dev *rdev)
 {
@@ -35,10 +38,10 @@ static int sy7636a_get_vcom_voltage_op(struct regulator_dev *rdev)
 
 static int sy7636a_get_status(struct regulator_dev *rdev)
 {
-       struct sy7636a *sy7636a = rdev_get_drvdata(rdev);
+       struct sy7636a_data *data = dev_get_drvdata(rdev->dev.parent);
        int ret = 0;
 
-       ret = gpiod_get_value_cansleep(sy7636a->pgood_gpio);
+       ret = gpiod_get_value_cansleep(data->pgood_gpio);
        if (ret < 0)
                dev_err(&rdev->dev, "Failed to read pgood gpio: %d\n", ret);
 
@@ -61,46 +64,50 @@ static const struct regulator_desc desc = {
        .owner = THIS_MODULE,
        .enable_reg = SY7636A_REG_OPERATION_MODE_CRL,
        .enable_mask = SY7636A_OPERATION_MODE_CRL_ONOFF,
-       .poll_enabled_time = SY7636A_POLL_ENABLED_TIME,
        .regulators_node = of_match_ptr("regulators"),
        .of_match = of_match_ptr("vcom"),
 };
 
 static int sy7636a_regulator_probe(struct platform_device *pdev)
 {
-       struct sy7636a *sy7636a = dev_get_drvdata(pdev->dev.parent);
+       struct regmap *regmap = dev_get_drvdata(pdev->dev.parent);
        struct regulator_config config = { };
        struct regulator_dev *rdev;
        struct gpio_desc *gdp;
+       struct sy7636a_data *data;
        int ret;
 
-       if (!sy7636a)
+       if (!regmap)
                return -EPROBE_DEFER;
 
-       platform_set_drvdata(pdev, sy7636a);
-
-       gdp = devm_gpiod_get(sy7636a->dev, "epd-pwr-good", GPIOD_IN);
+       gdp = devm_gpiod_get(pdev->dev.parent, "epd-pwr-good", GPIOD_IN);
        if (IS_ERR(gdp)) {
-               dev_err(sy7636a->dev, "Power good GPIO fault %ld\n", PTR_ERR(gdp));
+               dev_err(pdev->dev.parent, "Power good GPIO fault %ld\n", PTR_ERR(gdp));
                return PTR_ERR(gdp);
        }
 
-       sy7636a->pgood_gpio = gdp;
+       data = devm_kzalloc(&pdev->dev, sizeof(struct sy7636a_data), GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       data->regmap = regmap;
+       data->pgood_gpio = gdp;
+
+       platform_set_drvdata(pdev, data);
 
-       ret = regmap_write(sy7636a->regmap, SY7636A_REG_POWER_ON_DELAY_TIME, 0x0);
+       ret = regmap_write(regmap, SY7636A_REG_POWER_ON_DELAY_TIME, 0x0);
        if (ret) {
-               dev_err(sy7636a->dev, "Failed to initialize regulator: %d\n", ret);
+               dev_err(pdev->dev.parent, "Failed to initialize regulator: %d\n", ret);
                return ret;
        }
 
        config.dev = &pdev->dev;
-       config.dev->of_node = sy7636a->dev->of_node;
-       config.driver_data = sy7636a;
-       config.regmap = sy7636a->regmap;
+       config.dev->of_node = pdev->dev.parent->of_node;
+       config.regmap = regmap;
 
        rdev = devm_regulator_register(&pdev->dev, &desc, &config);
        if (IS_ERR(rdev)) {
-               dev_err(sy7636a->dev, "Failed to register %s regulator\n",
+               dev_err(pdev->dev.parent, "Failed to register %s regulator\n",
                        pdev->name);
                return PTR_ERR(rdev);
        }
index 62d243f..5e915cf 100644 (file)
@@ -25,6 +25,7 @@ struct sy8824_config {
        unsigned int vsel_min;
        unsigned int vsel_step;
        unsigned int vsel_count;
+       const struct regmap_config *config;
 };
 
 struct sy8824_device_info {
@@ -110,6 +111,15 @@ static int sy8824_regulator_register(struct sy8824_device_info *di,
 static const struct regmap_config sy8824_regmap_config = {
        .reg_bits = 8,
        .val_bits = 8,
+       .num_reg_defaults_raw = 1,
+       .cache_type = REGCACHE_FLAT,
+};
+
+static const struct regmap_config sy20276_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 8,
+       .num_reg_defaults_raw = 2,
+       .cache_type = REGCACHE_FLAT,
 };
 
 static int sy8824_i2c_probe(struct i2c_client *client)
@@ -134,7 +144,7 @@ static int sy8824_i2c_probe(struct i2c_client *client)
        di->dev = dev;
        di->cfg = of_device_get_match_data(dev);
 
-       regmap = devm_regmap_init_i2c(client, &sy8824_regmap_config);
+       regmap = devm_regmap_init_i2c(client, di->cfg->config);
        if (IS_ERR(regmap)) {
                dev_err(dev, "Failed to allocate regmap!\n");
                return PTR_ERR(regmap);
@@ -160,6 +170,7 @@ static const struct sy8824_config sy8824c_cfg = {
        .vsel_min = 762500,
        .vsel_step = 12500,
        .vsel_count = 64,
+       .config = &sy8824_regmap_config,
 };
 
 static const struct sy8824_config sy8824e_cfg = {
@@ -169,6 +180,7 @@ static const struct sy8824_config sy8824e_cfg = {
        .vsel_min = 700000,
        .vsel_step = 12500,
        .vsel_count = 64,
+       .config = &sy8824_regmap_config,
 };
 
 static const struct sy8824_config sy20276_cfg = {
@@ -178,6 +190,7 @@ static const struct sy8824_config sy20276_cfg = {
        .vsel_min = 600000,
        .vsel_step = 10000,
        .vsel_count = 128,
+       .config = &sy20276_regmap_config,
 };
 
 static const struct sy8824_config sy20278_cfg = {
@@ -187,6 +200,7 @@ static const struct sy8824_config sy20278_cfg = {
        .vsel_min = 762500,
        .vsel_step = 12500,
        .vsel_count = 64,
+       .config = &sy20276_regmap_config,
 };
 
 static const struct of_device_id sy8824_dt_ids[] = {
index 52e8c17..7d5d9f8 100644 (file)
 #define   SY8827N_MODE         (1 << 6)
 #define SY8827N_VSEL1          1
 #define SY8827N_CTRL           2
+#define SY8827N_ID1            3
+#define SY8827N_ID2            4
+#define SY8827N_PGOOD          5
+#define SY8827N_MAX            (SY8827N_PGOOD + 1)
 
 #define SY8827N_NVOLTAGES      64
 #define SY8827N_VSELMIN                600000
@@ -102,9 +106,19 @@ static int sy8827n_regulator_register(struct sy8827n_device_info *di,
        return PTR_ERR_OR_ZERO(rdev);
 }
 
+static bool sy8827n_volatile_reg(struct device *dev, unsigned int reg)
+{
+       if (reg == SY8827N_PGOOD)
+               return true;
+       return false;
+}
+
 static const struct regmap_config sy8827n_regmap_config = {
        .reg_bits = 8,
        .val_bits = 8,
+       .volatile_reg = sy8827n_volatile_reg,
+       .num_reg_defaults_raw = SY8827N_MAX,
+       .cache_type = REGCACHE_FLAT,
 };
 
 static int sy8827n_i2c_probe(struct i2c_client *client)
index 1d5b0a1..06cbe60 100644 (file)
@@ -1211,12 +1211,10 @@ static int tps65910_probe(struct platform_device *pdev)
 
                rdev = devm_regulator_register(&pdev->dev, &pmic->desc[i],
                                               &config);
-               if (IS_ERR(rdev)) {
-                       dev_err(tps65910->dev,
-                               "failed to register %s regulator\n",
-                               pdev->name);
-                       return PTR_ERR(rdev);
-               }
+               if (IS_ERR(rdev))
+                       return dev_err_probe(tps65910->dev, PTR_ERR(rdev),
+                                            "failed to register %s regulator\n",
+                                            pdev->name);
 
                /* Save regulator for cleanup */
                pmic->rdev[i] = rdev;
index cbadb1c..d2a3797 100644 (file)
@@ -37,7 +37,6 @@ struct vctrl_voltage_table {
 struct vctrl_data {
        struct regulator_dev *rdev;
        struct regulator_desc desc;
-       struct regulator *ctrl_reg;
        bool enabled;
        unsigned int min_slew_down_rate;
        unsigned int ovp_threshold;
@@ -82,7 +81,12 @@ static int vctrl_calc_output_voltage(struct vctrl_data *vctrl, int ctrl_uV)
 static int vctrl_get_voltage(struct regulator_dev *rdev)
 {
        struct vctrl_data *vctrl = rdev_get_drvdata(rdev);
-       int ctrl_uV = regulator_get_voltage_rdev(vctrl->ctrl_reg->rdev);
+       int ctrl_uV;
+
+       if (!rdev->supply)
+               return -EPROBE_DEFER;
+
+       ctrl_uV = regulator_get_voltage_rdev(rdev->supply->rdev);
 
        return vctrl_calc_output_voltage(vctrl, ctrl_uV);
 }
@@ -92,14 +96,19 @@ static int vctrl_set_voltage(struct regulator_dev *rdev,
                             unsigned int *selector)
 {
        struct vctrl_data *vctrl = rdev_get_drvdata(rdev);
-       struct regulator *ctrl_reg = vctrl->ctrl_reg;
-       int orig_ctrl_uV = regulator_get_voltage_rdev(ctrl_reg->rdev);
-       int uV = vctrl_calc_output_voltage(vctrl, orig_ctrl_uV);
+       int orig_ctrl_uV;
+       int uV;
        int ret;
 
+       if (!rdev->supply)
+               return -EPROBE_DEFER;
+
+       orig_ctrl_uV = regulator_get_voltage_rdev(rdev->supply->rdev);
+       uV = vctrl_calc_output_voltage(vctrl, orig_ctrl_uV);
+
        if (req_min_uV >= uV || !vctrl->ovp_threshold)
                /* voltage rising or no OVP */
-               return regulator_set_voltage_rdev(ctrl_reg->rdev,
+               return regulator_set_voltage_rdev(rdev->supply->rdev,
                        vctrl_calc_ctrl_voltage(vctrl, req_min_uV),
                        vctrl_calc_ctrl_voltage(vctrl, req_max_uV),
                        PM_SUSPEND_ON);
@@ -117,7 +126,7 @@ static int vctrl_set_voltage(struct regulator_dev *rdev,
                next_uV = max_t(int, req_min_uV, uV - max_drop_uV);
                next_ctrl_uV = vctrl_calc_ctrl_voltage(vctrl, next_uV);
 
-               ret = regulator_set_voltage_rdev(ctrl_reg->rdev,
+               ret = regulator_set_voltage_rdev(rdev->supply->rdev,
                                            next_ctrl_uV,
                                            next_ctrl_uV,
                                            PM_SUSPEND_ON);
@@ -134,7 +143,7 @@ static int vctrl_set_voltage(struct regulator_dev *rdev,
 
 err:
        /* Try to go back to original voltage */
-       regulator_set_voltage_rdev(ctrl_reg->rdev, orig_ctrl_uV, orig_ctrl_uV,
+       regulator_set_voltage_rdev(rdev->supply->rdev, orig_ctrl_uV, orig_ctrl_uV,
                                   PM_SUSPEND_ON);
 
        return ret;
@@ -151,16 +160,18 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev,
                                 unsigned int selector)
 {
        struct vctrl_data *vctrl = rdev_get_drvdata(rdev);
-       struct regulator *ctrl_reg = vctrl->ctrl_reg;
        unsigned int orig_sel = vctrl->sel;
        int ret;
 
+       if (!rdev->supply)
+               return -EPROBE_DEFER;
+
        if (selector >= rdev->desc->n_voltages)
                return -EINVAL;
 
        if (selector >= vctrl->sel || !vctrl->ovp_threshold) {
                /* voltage rising or no OVP */
-               ret = regulator_set_voltage_rdev(ctrl_reg->rdev,
+               ret = regulator_set_voltage_rdev(rdev->supply->rdev,
                                            vctrl->vtable[selector].ctrl,
                                            vctrl->vtable[selector].ctrl,
                                            PM_SUSPEND_ON);
@@ -179,7 +190,7 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev,
                else
                        next_sel = vctrl->vtable[vctrl->sel].ovp_min_sel;
 
-               ret = regulator_set_voltage_rdev(ctrl_reg->rdev,
+               ret = regulator_set_voltage_rdev(rdev->supply->rdev,
                                            vctrl->vtable[next_sel].ctrl,
                                            vctrl->vtable[next_sel].ctrl,
                                            PM_SUSPEND_ON);
@@ -202,7 +213,7 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev,
 err:
        if (vctrl->sel != orig_sel) {
                /* Try to go back to original voltage */
-               if (!regulator_set_voltage_rdev(ctrl_reg->rdev,
+               if (!regulator_set_voltage_rdev(rdev->supply->rdev,
                                           vctrl->vtable[orig_sel].ctrl,
                                           vctrl->vtable[orig_sel].ctrl,
                                           PM_SUSPEND_ON))
@@ -234,10 +245,6 @@ static int vctrl_parse_dt(struct platform_device *pdev,
        u32 pval;
        u32 vrange_ctrl[2];
 
-       vctrl->ctrl_reg = devm_regulator_get(&pdev->dev, "ctrl");
-       if (IS_ERR(vctrl->ctrl_reg))
-               return PTR_ERR(vctrl->ctrl_reg);
-
        ret = of_property_read_u32(np, "ovp-threshold-percent", &pval);
        if (!ret) {
                vctrl->ovp_threshold = pval;
@@ -315,11 +322,11 @@ static int vctrl_cmp_ctrl_uV(const void *a, const void *b)
        return at->ctrl - bt->ctrl;
 }
 
-static int vctrl_init_vtable(struct platform_device *pdev)
+static int vctrl_init_vtable(struct platform_device *pdev,
+                            struct regulator *ctrl_reg)
 {
        struct vctrl_data *vctrl = platform_get_drvdata(pdev);
        struct regulator_desc *rdesc = &vctrl->desc;
-       struct regulator *ctrl_reg = vctrl->ctrl_reg;
        struct vctrl_voltage_range *vrange_ctrl = &vctrl->vrange.ctrl;
        int n_voltages;
        int ctrl_uV;
@@ -395,23 +402,19 @@ static int vctrl_init_vtable(struct platform_device *pdev)
 static int vctrl_enable(struct regulator_dev *rdev)
 {
        struct vctrl_data *vctrl = rdev_get_drvdata(rdev);
-       int ret = regulator_enable(vctrl->ctrl_reg);
 
-       if (!ret)
-               vctrl->enabled = true;
+       vctrl->enabled = true;
 
-       return ret;
+       return 0;
 }
 
 static int vctrl_disable(struct regulator_dev *rdev)
 {
        struct vctrl_data *vctrl = rdev_get_drvdata(rdev);
-       int ret = regulator_disable(vctrl->ctrl_reg);
 
-       if (!ret)
-               vctrl->enabled = false;
+       vctrl->enabled = false;
 
-       return ret;
+       return 0;
 }
 
 static int vctrl_is_enabled(struct regulator_dev *rdev)
@@ -447,6 +450,7 @@ static int vctrl_probe(struct platform_device *pdev)
        struct regulator_desc *rdesc;
        struct regulator_config cfg = { };
        struct vctrl_voltage_range *vrange_ctrl;
+       struct regulator *ctrl_reg;
        int ctrl_uV;
        int ret;
 
@@ -461,15 +465,20 @@ static int vctrl_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
+       ctrl_reg = devm_regulator_get(&pdev->dev, "ctrl");
+       if (IS_ERR(ctrl_reg))
+               return PTR_ERR(ctrl_reg);
+
        vrange_ctrl = &vctrl->vrange.ctrl;
 
        rdesc = &vctrl->desc;
        rdesc->name = "vctrl";
        rdesc->type = REGULATOR_VOLTAGE;
        rdesc->owner = THIS_MODULE;
+       rdesc->supply_name = "ctrl";
 
-       if ((regulator_get_linear_step(vctrl->ctrl_reg) == 1) ||
-           (regulator_count_voltages(vctrl->ctrl_reg) == -EINVAL)) {
+       if ((regulator_get_linear_step(ctrl_reg) == 1) ||
+           (regulator_count_voltages(ctrl_reg) == -EINVAL)) {
                rdesc->continuous_voltage_range = true;
                rdesc->ops = &vctrl_ops_cont;
        } else {
@@ -486,11 +495,12 @@ static int vctrl_probe(struct platform_device *pdev)
        cfg.init_data = init_data;
 
        if (!rdesc->continuous_voltage_range) {
-               ret = vctrl_init_vtable(pdev);
+               ret = vctrl_init_vtable(pdev, ctrl_reg);
                if (ret)
                        return ret;
 
-               ctrl_uV = regulator_get_voltage_rdev(vctrl->ctrl_reg->rdev);
+               /* Use locked consumer API when not in regulator framework */
+               ctrl_uV = regulator_get_voltage(ctrl_reg);
                if (ctrl_uV < 0) {
                        dev_err(&pdev->dev, "failed to get control voltage\n");
                        return ctrl_uV;
@@ -513,6 +523,9 @@ static int vctrl_probe(struct platform_device *pdev)
                }
        }
 
+       /* Drop ctrl-supply here in favor of regulator core managed supply */
+       devm_regulator_put(ctrl_reg);
+
        vctrl->rdev = devm_regulator_register(&pdev->dev, rdesc, &cfg);
        if (IS_ERR(vctrl->rdev)) {
                ret = PTR_ERR(vctrl->rdev);
index 328f70f..5656cac 100644 (file)
@@ -116,7 +116,7 @@ config RESET_LPC18XX
 
 config RESET_MCHP_SPARX5
        bool "Microchip Sparx5 reset driver"
-       depends on HAS_IOMEM || COMPILE_TEST
+       depends on ARCH_SPARX5 || COMPILE_TEST
        default y if SPARX5_SWITCH
        select MFD_SYSCON
        help
index daa425e..59dc0ff 100644 (file)
@@ -53,7 +53,8 @@ static int zynqmp_reset_status(struct reset_controller_dev *rcdev,
                               unsigned long id)
 {
        struct zynqmp_reset_data *priv = to_zynqmp_reset_data(rcdev);
-       int val, err;
+       int err;
+       u32 val;
 
        err = zynqmp_pm_reset_get_status(priv->data->reset_id + id, &val);
        if (err)
index 6bb7752..db59872 100644 (file)
@@ -552,7 +552,7 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
        dbio = dreq->bio;
        recid = first_rec;
        rq_for_each_segment(bv, req, iter) {
-               dst = page_address(bv.bv_page) + bv.bv_offset;
+               dst = bvec_virt(&bv);
                for (off = 0; off < bv.bv_len; off += blksize) {
                        memset(dbio, 0, sizeof (struct dasd_diag_bio));
                        dbio->type = rw_cmd;
index 0de1a46..460e0f1 100644 (file)
@@ -1004,15 +1004,23 @@ static unsigned char dasd_eckd_path_access(void *conf_data, int conf_len)
 static void dasd_eckd_store_conf_data(struct dasd_device *device,
                                      struct dasd_conf_data *conf_data, int chp)
 {
+       struct dasd_eckd_private *private = device->private;
        struct channel_path_desc_fmt0 *chp_desc;
        struct subchannel_id sch_id;
+       void *cdp;
 
-       ccw_device_get_schid(device->cdev, &sch_id);
        /*
         * path handling and read_conf allocate data
         * free it before replacing the pointer
+        * also replace the old private->conf_data pointer
+        * with the new one if this points to the same data
         */
-       kfree(device->path[chp].conf_data);
+       cdp = device->path[chp].conf_data;
+       if (private->conf_data == cdp) {
+               private->conf_data = (void *)conf_data;
+               dasd_eckd_identify_conf_parts(private);
+       }
+       ccw_device_get_schid(device->cdev, &sch_id);
        device->path[chp].conf_data = conf_data;
        device->path[chp].cssid = sch_id.cssid;
        device->path[chp].ssid = sch_id.ssid;
@@ -1020,6 +1028,7 @@ static void dasd_eckd_store_conf_data(struct dasd_device *device,
        if (chp_desc)
                device->path[chp].chpid = chp_desc->chpid;
        kfree(chp_desc);
+       kfree(cdp);
 }
 
 static void dasd_eckd_clear_conf_data(struct dasd_device *device)
@@ -3267,7 +3276,7 @@ static int dasd_eckd_ese_read(struct dasd_ccw_req *cqr, struct irb *irb)
        end_blk = (curr_trk + 1) * recs_per_trk;
 
        rq_for_each_segment(bv, req, iter) {
-               dst = page_address(bv.bv_page) + bv.bv_offset;
+               dst = bvec_virt(&bv);
                for (off = 0; off < bv.bv_len; off += blksize) {
                        if (first_blk + blk_count >= end_blk) {
                                cqr->proc_bytes = blk_count * blksize;
@@ -3999,7 +4008,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
                              last_rec - recid + 1, cmd, basedev, blksize);
        }
        rq_for_each_segment(bv, req, iter) {
-               dst = page_address(bv.bv_page) + bv.bv_offset;
+               dst = bvec_virt(&bv);
                if (dasd_page_cache) {
                        char *copy = kmem_cache_alloc(dasd_page_cache,
                                                      GFP_DMA | __GFP_NOWARN);
@@ -4166,7 +4175,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
        idaw_dst = NULL;
        idaw_len = 0;
        rq_for_each_segment(bv, req, iter) {
-               dst = page_address(bv.bv_page) + bv.bv_offset;
+               dst = bvec_virt(&bv);
                seg_len = bv.bv_len;
                while (seg_len) {
                        if (new_track) {
@@ -4509,7 +4518,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
                new_track = 1;
                recid = first_rec;
                rq_for_each_segment(bv, req, iter) {
-                       dst = page_address(bv.bv_page) + bv.bv_offset;
+                       dst = bvec_virt(&bv);
                        seg_len = bv.bv_len;
                        while (seg_len) {
                                if (new_track) {
@@ -4542,7 +4551,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
                }
        } else {
                rq_for_each_segment(bv, req, iter) {
-                       dst = page_address(bv.bv_page) + bv.bv_offset;
+                       dst = bvec_virt(&bv);
                        last_tidaw = itcw_add_tidaw(itcw, 0x00,
                                                    dst, bv.bv_len);
                        if (IS_ERR(last_tidaw)) {
@@ -4778,7 +4787,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
                        idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
        }
        rq_for_each_segment(bv, req, iter) {
-               dst = page_address(bv.bv_page) + bv.bv_offset;
+               dst = bvec_virt(&bv);
                seg_len = bv.bv_len;
                if (cmd == DASD_ECKD_CCW_READ_TRACK)
                        memset(dst, 0, seg_len);
@@ -4839,7 +4848,7 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
        if (private->uses_cdl == 0 || recid > 2*blk_per_trk)
                ccw++;
        rq_for_each_segment(bv, req, iter) {
-               dst = page_address(bv.bv_page) + bv.bv_offset;
+               dst = bvec_virt(&bv);
                for (off = 0; off < bv.bv_len; off += blksize) {
                        /* Skip locate record. */
                        if (private->uses_cdl && recid <= 2*blk_per_trk)
index 3ad319a..e084f4d 100644 (file)
@@ -501,7 +501,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp_regular(
        }
        recid = first_rec;
        rq_for_each_segment(bv, req, iter) {
-               dst = page_address(bv.bv_page) + bv.bv_offset;
+               dst = bvec_virt(&bv);
                if (dasd_page_cache) {
                        char *copy = kmem_cache_alloc(dasd_page_cache,
                                                      GFP_DMA | __GFP_NOWARN);
@@ -583,7 +583,7 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
        if (private->rdc_data.mode.bits.data_chain != 0)
                ccw++;
        rq_for_each_segment(bv, req, iter) {
-               dst = page_address(bv.bv_page) + bv.bv_offset;
+               dst = bvec_virt(&bv);
                for (off = 0; off < bv.bv_len; off += blksize) {
                        /* Skip locate record. */
                        if (private->rdc_data.mode.bits.data_chain == 0)
index 493e846..fa966e0 100644 (file)
@@ -24,6 +24,8 @@
 
 #include "dasd_int.h"
 
+static struct lock_class_key dasd_bio_compl_lkclass;
+
 /*
  * Allocate and register gendisk structure for device.
  */
@@ -38,13 +40,15 @@ int dasd_gendisk_alloc(struct dasd_block *block)
        if (base->devindex >= DASD_PER_MAJOR)
                return -EBUSY;
 
-       gdp = alloc_disk(1 << DASD_PARTN_BITS);
+       gdp = __alloc_disk_node(block->request_queue, NUMA_NO_NODE,
+                               &dasd_bio_compl_lkclass);
        if (!gdp)
                return -ENOMEM;
 
        /* Initialize gendisk structure. */
        gdp->major = DASD_MAJOR;
        gdp->first_minor = base->devindex << DASD_PARTN_BITS;
+       gdp->minors = 1 << DASD_PARTN_BITS;
        gdp->fops = &dasd_device_operations;
 
        /*
@@ -73,7 +77,6 @@ int dasd_gendisk_alloc(struct dasd_block *block)
            test_bit(DASD_FLAG_DEVICE_RO, &base->flags))
                set_disk_ro(gdp, 1);
        dasd_add_link_to_gendisk(gdp, base);
-       gdp->queue = block->request_queue;
        block->gdp = gdp;
        set_capacity(block->gdp, 0);
        device_add_disk(&base->cdev->dev, block->gdp, NULL);
index 9f64244..468cbeb 100644 (file)
@@ -575,10 +575,8 @@ int dasd_ioctl(struct block_device *bdev, fmode_t mode,
        else
                argp = (void __user *)arg;
 
-       if ((_IOC_DIR(cmd) != _IOC_NONE) && !arg) {
-               PRINT_DEBUG("empty data ptr");
+       if ((_IOC_DIR(cmd) != _IOC_NONE) && !arg)
                return -EINVAL;
-       }
 
        base = dasd_device_from_gendisk(bdev->bd_disk);
        if (!base)
index 29180bd..5be3d1c 100644 (file)
@@ -892,8 +892,7 @@ dcssblk_submit_bio(struct bio *bio)
 
        index = (bio->bi_iter.bi_sector >> 3);
        bio_for_each_segment(bvec, bio, iter) {
-               page_addr = (unsigned long)
-                       page_address(bvec.bv_page) + bvec.bv_offset;
+               page_addr = (unsigned long)bvec_virt(&bvec);
                source_addr = dev_info->start + (index<<12) + bytes_done;
                if (unlikely((page_addr & 4095) != 0) || (bvec.bv_len & 4095) != 0)
                        // More paranoia.
index 792b4bf..b4b84e3 100644 (file)
 #include <linux/platform_device.h>
 #include <asm/types.h>
 #include <asm/irq.h>
+#include <asm/debug.h>
 
 #include "sclp.h"
 
 #define SCLP_HEADER            "sclp: "
 
+struct sclp_trace_entry {
+       char id[4];
+       u32 a;
+       u64 b;
+};
+
+#define SCLP_TRACE_ENTRY_SIZE          sizeof(struct sclp_trace_entry)
+#define SCLP_TRACE_MAX_SIZE            128
+#define SCLP_TRACE_EVENT_MAX_SIZE      64
+
+/* Debug trace area intended for all entries in abbreviated form. */
+DEFINE_STATIC_DEBUG_INFO(sclp_debug, "sclp", 8, 1, SCLP_TRACE_ENTRY_SIZE,
+                        &debug_hex_ascii_view);
+
+/* Error trace area intended for full entries relating to failed requests. */
+DEFINE_STATIC_DEBUG_INFO(sclp_debug_err, "sclp_err", 4, 1,
+                        SCLP_TRACE_ENTRY_SIZE, &debug_hex_ascii_view);
+
 /* Lock to protect internal data consistency. */
 static DEFINE_SPINLOCK(sclp_lock);
 
@@ -54,6 +73,114 @@ int sclp_console_drop = 1;
 /* Number of times the console dropped buffer pages */
 unsigned long sclp_console_full;
 
+/* The currently active SCLP command word. */
+static sclp_cmdw_t active_cmd;
+
+static inline void sclp_trace(int prio, char *id, u32 a, u64 b, bool err)
+{
+       struct sclp_trace_entry e;
+
+       memset(&e, 0, sizeof(e));
+       strncpy(e.id, id, sizeof(e.id));
+       e.a = a;
+       e.b = b;
+       debug_event(&sclp_debug, prio, &e, sizeof(e));
+       if (err)
+               debug_event(&sclp_debug_err, 0, &e, sizeof(e));
+}
+
+static inline int no_zeroes_len(void *data, int len)
+{
+       char *d = data;
+
+       /* Minimize trace area usage by not tracing trailing zeroes. */
+       while (len > SCLP_TRACE_ENTRY_SIZE && d[len - 1] == 0)
+               len--;
+
+       return len;
+}
+
+static inline void sclp_trace_bin(int prio, void *d, int len, int errlen)
+{
+       debug_event(&sclp_debug, prio, d, no_zeroes_len(d, len));
+       if (errlen)
+               debug_event(&sclp_debug_err, 0, d, no_zeroes_len(d, errlen));
+}
+
+static inline int abbrev_len(sclp_cmdw_t cmd, struct sccb_header *sccb)
+{
+       struct evbuf_header *evbuf = (struct evbuf_header *)(sccb + 1);
+       int len = sccb->length, limit = SCLP_TRACE_MAX_SIZE;
+
+       /* Full SCCB tracing if debug level is set to max. */
+       if (sclp_debug.level == DEBUG_MAX_LEVEL)
+               return len;
+
+       /* Minimal tracing for console writes. */
+       if (cmd == SCLP_CMDW_WRITE_EVENT_DATA &&
+           (evbuf->type == EVTYP_MSG  || evbuf->type == EVTYP_VT220MSG))
+               limit = SCLP_TRACE_ENTRY_SIZE;
+
+       return min(len, limit);
+}
+
+static inline void sclp_trace_sccb(int prio, char *id, u32 a, u64 b,
+                                  sclp_cmdw_t cmd, struct sccb_header *sccb,
+                                  bool err)
+{
+       sclp_trace(prio, id, a, b, err);
+       if (sccb) {
+               sclp_trace_bin(prio + 1, sccb, abbrev_len(cmd, sccb),
+                              err ? sccb->length : 0);
+       }
+}
+
+static inline void sclp_trace_evbuf(int prio, char *id, u32 a, u64 b,
+                                   struct evbuf_header *evbuf, bool err)
+{
+       sclp_trace(prio, id, a, b, err);
+       sclp_trace_bin(prio + 1, evbuf,
+                      min((int)evbuf->length, (int)SCLP_TRACE_EVENT_MAX_SIZE),
+                      err ? evbuf->length : 0);
+}
+
+static inline void sclp_trace_req(int prio, char *id, struct sclp_req *req,
+                                 bool err)
+{
+       struct sccb_header *sccb = req->sccb;
+       union {
+               struct {
+                       u16 status;
+                       u16 response;
+                       u16 timeout;
+                       u16 start_count;
+               };
+               u64 b;
+       } summary;
+
+       summary.status = req->status;
+       summary.response = sccb ? sccb->response_code : 0;
+       summary.timeout = (u16)req->queue_timeout;
+       summary.start_count = (u16)req->start_count;
+
+       sclp_trace(prio, id, (u32)(addr_t)sccb, summary.b, err);
+}
+
+static inline void sclp_trace_register(int prio, char *id, u32 a, u64 b,
+                                      struct sclp_register *reg)
+{
+       struct {
+               u64 receive;
+               u64 send;
+       } d;
+
+       d.receive = reg->receive_mask;
+       d.send = reg->send_mask;
+
+       sclp_trace(prio, id, a, b, false);
+       sclp_trace_bin(prio, &d, sizeof(d), 0);
+}
+
 static int __init sclp_setup_console_pages(char *str)
 {
        int pages, rc;
@@ -162,6 +289,9 @@ static void sclp_request_timeout(bool force_restart)
 {
        unsigned long flags;
 
+       /* TMO: A timeout occurred (a=force_restart) */
+       sclp_trace(2, "TMO", force_restart, 0, true);
+
        spin_lock_irqsave(&sclp_lock, flags);
        if (force_restart) {
                if (sclp_running_state == sclp_running_state_running) {
@@ -237,6 +367,12 @@ static void sclp_req_queue_timeout(struct timer_list *unused)
 
        do {
                req = __sclp_req_queue_remove_expired_req();
+
+               if (req) {
+                       /* RQTM: Request timed out (a=sccb, b=summary) */
+                       sclp_trace_req(2, "RQTM", req, true);
+               }
+
                if (req && req->callback)
                        req->callback(req, req->callback_data);
        } while (req);
@@ -248,6 +384,25 @@ static void sclp_req_queue_timeout(struct timer_list *unused)
        spin_unlock_irqrestore(&sclp_lock, flags);
 }
 
+static int sclp_service_call_trace(sclp_cmdw_t command, void *sccb)
+{
+       static u64 srvc_count;
+       int rc;
+
+       /* SRV1: Service call about to be issued (a=command, b=sccb address) */
+       sclp_trace_sccb(0, "SRV1", command, (u64)sccb, command, sccb, false);
+
+       rc = sclp_service_call(command, sccb);
+
+       /* SRV2: Service call was issued (a=rc, b=SRVC sequence number) */
+       sclp_trace(0, "SRV2", -rc, ++srvc_count, rc != 0);
+
+       if (rc == 0)
+               active_cmd = command;
+
+       return rc;
+}
+
 /* Try to start a request. Return zero if the request was successfully
  * started or if it will be started at a later time. Return non-zero otherwise.
  * Called while sclp_lock is locked. */
@@ -259,7 +414,7 @@ __sclp_start_request(struct sclp_req *req)
        if (sclp_running_state != sclp_running_state_idle)
                return 0;
        del_timer(&sclp_request_timer);
-       rc = sclp_service_call(req->command, req->sccb);
+       rc = sclp_service_call_trace(req->command, req->sccb);
        req->start_count++;
 
        if (rc == 0) {
@@ -309,6 +464,10 @@ sclp_process_queue(void)
                }
                /* Post-processing for aborted request */
                list_del(&req->list);
+
+               /* RQAB: Request aborted (a=sccb, b=summary) */
+               sclp_trace_req(2, "RQAB", req, true);
+
                if (req->callback) {
                        spin_unlock_irqrestore(&sclp_lock, flags);
                        req->callback(req, req->callback_data);
@@ -341,6 +500,10 @@ sclp_add_request(struct sclp_req *req)
                spin_unlock_irqrestore(&sclp_lock, flags);
                return -EIO;
        }
+
+       /* RQAD: Request was added (a=sccb, b=caller) */
+       sclp_trace(2, "RQAD", (u32)(addr_t)req->sccb, _RET_IP_, false);
+
        req->status = SCLP_REQ_QUEUED;
        req->start_count = 0;
        list_add_tail(&req->list, &sclp_req_queue);
@@ -394,6 +557,11 @@ sclp_dispatch_evbufs(struct sccb_header *sccb)
                        else
                                reg = NULL;
                }
+
+               /* EVNT: Event callback (b=receiver) */
+               sclp_trace_evbuf(2, "EVNT", 0, reg ? (u64)reg->receiver_fn : 0,
+                                evbuf, !reg);
+
                if (reg && reg->receiver_fn) {
                        spin_unlock_irqrestore(&sclp_lock, flags);
                        reg->receiver_fn(evbuf);
@@ -455,6 +623,30 @@ __sclp_find_req(u32 sccb)
        return NULL;
 }
 
+static bool ok_response(u32 sccb_int, sclp_cmdw_t cmd)
+{
+       struct sccb_header *sccb = (struct sccb_header *)(addr_t)sccb_int;
+       struct evbuf_header *evbuf;
+       u16 response;
+
+       if (!sccb)
+               return true;
+
+       /* Check SCCB response. */
+       response = sccb->response_code & 0xff;
+       if (response != 0x10 && response != 0x20)
+               return false;
+
+       /* Check event-processed flag on outgoing events. */
+       if (cmd == SCLP_CMDW_WRITE_EVENT_DATA) {
+               evbuf = (struct evbuf_header *)(sccb + 1);
+               if (!(evbuf->flags & 0x80))
+                       return false;
+       }
+
+       return true;
+}
+
 /* Handler for external interruption. Perform request post-processing.
  * Prepare read event data request if necessary. Start processing of next
  * request on queue. */
@@ -469,6 +661,12 @@ static void sclp_interrupt_handler(struct ext_code ext_code,
        spin_lock(&sclp_lock);
        finished_sccb = param32 & 0xfffffff8;
        evbuf_pending = param32 & 0x3;
+
+       /* INT: Interrupt received (a=intparm, b=cmd) */
+       sclp_trace_sccb(0, "INT", param32, active_cmd, active_cmd,
+                       (struct sccb_header *)(addr_t)finished_sccb,
+                       !ok_response(finished_sccb, active_cmd));
+
        if (finished_sccb) {
                del_timer(&sclp_request_timer);
                sclp_running_state = sclp_running_state_reset_pending;
@@ -477,13 +675,21 @@ static void sclp_interrupt_handler(struct ext_code ext_code,
                        /* Request post-processing */
                        list_del(&req->list);
                        req->status = SCLP_REQ_DONE;
+
+                       /* RQOK: Request success (a=sccb, b=summary) */
+                       sclp_trace_req(2, "RQOK", req, false);
+
                        if (req->callback) {
                                spin_unlock(&sclp_lock);
                                req->callback(req, req->callback_data);
                                spin_lock(&sclp_lock);
                        }
+               } else {
+                       /* UNEX: Unexpected SCCB completion (a=sccb address) */
+                       sclp_trace(0, "UNEX", finished_sccb, 0, true);
                }
                sclp_running_state = sclp_running_state_idle;
+               active_cmd = 0;
        }
        if (evbuf_pending &&
            sclp_activation_state == sclp_activation_state_active)
@@ -507,9 +713,13 @@ sclp_sync_wait(void)
        unsigned long long old_tick;
        unsigned long flags;
        unsigned long cr0, cr0_sync;
+       static u64 sync_count;
        u64 timeout;
        int irq_context;
 
+       /* SYN1: Synchronous wait start (a=runstate, b=sync count) */
+       sclp_trace(4, "SYN1", sclp_running_state, ++sync_count, false);
+
        /* We'll be disabling timer interrupts, so we need a custom timeout
         * mechanism */
        timeout = 0;
@@ -547,6 +757,9 @@ sclp_sync_wait(void)
                _local_bh_enable();
        local_tick_enable(old_tick);
        local_irq_restore(flags);
+
+       /* SYN2: Synchronous wait end (a=runstate, b=sync_count) */
+       sclp_trace(4, "SYN2", sclp_running_state, sync_count, false);
 }
 EXPORT_SYMBOL(sclp_sync_wait);
 
@@ -576,8 +789,13 @@ sclp_dispatch_state_change(void)
                                reg = NULL;
                }
                spin_unlock_irqrestore(&sclp_lock, flags);
-               if (reg && reg->state_change_fn)
+               if (reg && reg->state_change_fn) {
+                       /* STCG: State-change callback (b=callback) */
+                       sclp_trace(2, "STCG", 0, (u64)reg->state_change_fn,
+                                  false);
+
                        reg->state_change_fn(reg);
+               }
        } while (reg);
 }
 
@@ -651,6 +869,9 @@ sclp_register(struct sclp_register *reg)
        sccb_mask_t send_mask;
        int rc;
 
+       /* REG: Event listener registered (b=caller) */
+       sclp_trace_register(2, "REG", 0, _RET_IP_, reg);
+
        rc = sclp_init();
        if (rc)
                return rc;
@@ -683,6 +904,9 @@ sclp_unregister(struct sclp_register *reg)
 {
        unsigned long flags;
 
+       /* UREG: Event listener unregistered (b=caller) */
+       sclp_trace_register(2, "UREG", 0, _RET_IP_, reg);
+
        spin_lock_irqsave(&sclp_lock, flags);
        list_del(&reg->list);
        spin_unlock_irqrestore(&sclp_lock, flags);
@@ -932,7 +1156,7 @@ sclp_check_interface(void)
        for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) {
                __sclp_make_init_req(0, 0);
                sccb = (struct init_sccb *) sclp_init_req.sccb;
-               rc = sclp_service_call(sclp_init_req.command, sccb);
+               rc = sclp_service_call_trace(sclp_init_req.command, sccb);
                if (rc == -EIO)
                        break;
                sclp_init_req.status = SCLP_REQ_RUNNING;
index 8dd8ad8..5e43410 100644 (file)
@@ -310,8 +310,6 @@ extern int sclp_console_drop;
 extern unsigned long sclp_console_full;
 extern bool sclp_mask_compat_mode;
 
-extern char *sclp_early_sccb;
-
 void sclp_early_wait_irq(void);
 int sclp_early_cmd(sclp_cmdw_t cmd, void *sccb);
 unsigned int sclp_early_con_check_linemode(struct init_sccb *sccb);
index ab0518c..998933e 100644 (file)
@@ -457,7 +457,7 @@ static int __init sclp_detect_standby_memory(void)
        struct read_storage_sccb *sccb;
        int i, id, assigned, rc;
 
-       if (OLDMEM_BASE) /* No standby memory in kdump mode */
+       if (oldmem_data.start) /* No standby memory in kdump mode */
                return 0;
        if ((sclp.facilities & 0xe00000000000ULL) != 0xe00000000000ULL)
                return 0;
index 039b207..c365110 100644 (file)
@@ -50,12 +50,12 @@ static void sclp_cpu_capability_notify(struct work_struct *work)
 
        s390_update_cpu_mhz();
        pr_info("CPU capability may have changed\n");
-       get_online_cpus();
+       cpus_read_lock();
        for_each_online_cpu(cpu) {
                dev = get_cpu_device(cpu);
                kobject_uevent(&dev->kobj, KOBJ_CHANGE);
        }
-       put_online_cpus();
+       cpus_read_unlock();
 }
 
 static void __ref sclp_cpu_change_notify(struct work_struct *work)
index b7329af..676634d 100644 (file)
@@ -17,7 +17,7 @@
 
 static struct read_info_sccb __bootdata(sclp_info_sccb);
 static int __bootdata(sclp_info_sccb_valid);
-char *sclp_early_sccb = (char *) EARLY_SCCB_OFFSET;
+char *__bootdata(sclp_early_sccb);
 int sclp_init_state = sclp_init_state_uninitialized;
 /*
  * Used to keep track of the size of the event masks. Qemu until version 2.11
@@ -211,6 +211,11 @@ static int sclp_early_setup(int disable, int *have_linemode, int *have_vt220)
        return rc;
 }
 
+void sclp_early_set_buffer(void *sccb)
+{
+       sclp_early_sccb = sccb;
+}
+
 /*
  * Output one or more lines of text on the SCLP console (VT220 and /
  * or line-mode).
@@ -235,11 +240,20 @@ void sclp_early_printk(const char *str)
        __sclp_early_printk(str, strlen(str));
 }
 
+/*
+ * We can't pass sclp_info_sccb to sclp_early_cmd() here directly,
+ * because it might not fulfil the requiremets for a SCLP communication buffer:
+ *   - lie below 2G in memory
+ *   - be page-aligned
+ * Therefore, we use the buffer sclp_early_sccb (which fulfils all those
+ * requirements) temporarily for communication and copy a received response
+ * back into the buffer sclp_info_sccb upon successful completion.
+ */
 int __init sclp_early_read_info(void)
 {
        int i;
        int length = test_facility(140) ? EXT_SCCB_READ_SCP : PAGE_SIZE;
-       struct read_info_sccb *sccb = &sclp_info_sccb;
+       struct read_info_sccb *sccb = (struct read_info_sccb *)sclp_early_sccb;
        sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED,
                                  SCLP_CMDW_READ_SCP_INFO};
 
@@ -251,6 +265,7 @@ int __init sclp_early_read_info(void)
                if (sclp_early_cmd(commands[i], sccb))
                        break;
                if (sccb->header.response_code == 0x10) {
+                       memcpy(&sclp_info_sccb, sccb, length);
                        sclp_info_sccb_valid = 1;
                        return 0;
                }
index b5b0848..3ba2d93 100644 (file)
@@ -269,7 +269,7 @@ static int __init zcore_init(void)
 
        if (!is_ipl_type_dump())
                return -ENODATA;
-       if (OLDMEM_BASE)
+       if (oldmem_data.start)
                return -ENODATA;
 
        zcore_dbf = debug_register("zcore", 4, 1, 4 * sizeof(long));
index a974943..0ce48a3 100644 (file)
@@ -430,9 +430,26 @@ static ssize_t pimpampom_show(struct device *dev,
 }
 static DEVICE_ATTR_RO(pimpampom);
 
+static ssize_t dev_busid_show(struct device *dev,
+                             struct device_attribute *attr,
+                             char *buf)
+{
+       struct subchannel *sch = to_subchannel(dev);
+       struct pmcw *pmcw = &sch->schib.pmcw;
+
+       if ((pmcw->st == SUBCHANNEL_TYPE_IO ||
+            pmcw->st == SUBCHANNEL_TYPE_MSG) && pmcw->dnv)
+               return sysfs_emit(buf, "0.%x.%04x\n", sch->schid.ssid,
+                                 pmcw->dev);
+       else
+               return sysfs_emit(buf, "none\n");
+}
+static DEVICE_ATTR_RO(dev_busid);
+
 static struct attribute *io_subchannel_type_attrs[] = {
        &dev_attr_chpids.attr,
        &dev_attr_pimpampom.attr,
+       &dev_attr_dev_busid.attr,
        NULL,
 };
 ATTRIBUTE_GROUPS(io_subchannel_type);
@@ -886,6 +903,18 @@ static ssize_t real_cssid_show(struct device *dev, struct device_attribute *a,
 }
 static DEVICE_ATTR_RO(real_cssid);
 
+static ssize_t rescan_store(struct device *dev, struct device_attribute *a,
+                           const char *buf, size_t count)
+{
+       CIO_TRACE_EVENT(4, "usr-rescan");
+
+       css_schedule_eval_all();
+       css_complete_work();
+
+       return count;
+}
+static DEVICE_ATTR_WO(rescan);
+
 static ssize_t cm_enable_show(struct device *dev, struct device_attribute *a,
                              char *buf)
 {
@@ -932,6 +961,7 @@ static umode_t cm_enable_mode(struct kobject *kobj, struct attribute *attr,
 
 static struct attribute *cssdev_attrs[] = {
        &dev_attr_real_cssid.attr,
+       &dev_attr_rescan.attr,
        NULL,
 };
 
index f69ffbb..99c2212 100644 (file)
@@ -126,21 +126,9 @@ static inline int do_eqbs(u64 token, unsigned char *state, int queue,
 
 struct qdio_irq;
 
-struct siga_flag {
-       u8 input:1;
-       u8 output:1;
-       u8 sync:1;
-       u8 sync_after_ai:1;
-       u8 sync_out_after_pci:1;
-       u8:3;
-} __attribute__ ((packed));
-
 struct qdio_dev_perf_stat {
        unsigned int adapter_int;
        unsigned int qdio_int;
-       unsigned int pci_request_int;
-
-       unsigned int tasklet_outbound;
 
        unsigned int siga_read;
        unsigned int siga_write;
@@ -150,7 +138,6 @@ struct qdio_dev_perf_stat {
        unsigned int stop_polling;
        unsigned int inbound_queue_full;
        unsigned int outbound_call;
-       unsigned int outbound_handler;
        unsigned int outbound_queue_full;
        unsigned int fast_requeue;
        unsigned int target_full;
@@ -180,12 +167,6 @@ struct qdio_input_q {
 };
 
 struct qdio_output_q {
-       /* PCIs are enabled for the queue */
-       int pci_out_enabled;
-       /* timer to check for more outbound work */
-       struct timer_list timer;
-       /* tasklet to check for completions */
-       struct tasklet_struct tasklet;
 };
 
 /*
@@ -250,8 +231,7 @@ struct qdio_irq {
        unsigned long sch_token;        /* QEBSM facility */
 
        enum qdio_irq_states state;
-
-       struct siga_flag siga_flag;     /* siga sync information from qdioac */
+       u8 qdioac1;
 
        int nr_input_qs;
        int nr_output_qs;
@@ -263,7 +243,6 @@ struct qdio_irq {
        struct qdio_ssqd_desc ssqd_desc;
        void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *);
 
-       unsigned int scan_threshold;    /* used SBALs before tasklet schedule */
        int perf_stat_enabled;
 
        struct qdr *qdr;
@@ -325,13 +304,9 @@ static inline void qdio_deliver_irq(struct qdio_irq *irq)
 #define pci_out_supported(irq) ((irq)->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)
 #define is_qebsm(q)                    (q->irq_ptr->sch_token != 0)
 
-#define need_siga_in(q)                        (q->irq_ptr->siga_flag.input)
-#define need_siga_out(q)               (q->irq_ptr->siga_flag.output)
-#define need_siga_sync(q)              (unlikely(q->irq_ptr->siga_flag.sync))
-#define need_siga_sync_after_ai(q)     \
-       (unlikely(q->irq_ptr->siga_flag.sync_after_ai))
-#define need_siga_sync_out_after_pci(q)        \
-       (unlikely(q->irq_ptr->siga_flag.sync_out_after_pci))
+#define qdio_need_siga_in(irq)         ((irq)->qdioac1 & AC1_SIGA_INPUT_NEEDED)
+#define qdio_need_siga_out(irq)                ((irq)->qdioac1 & AC1_SIGA_OUTPUT_NEEDED)
+#define qdio_need_siga_sync(irq)       (unlikely((irq)->qdioac1 & AC1_SIGA_SYNC_NEEDED))
 
 #define for_each_input_queue(irq_ptr, q, i)            \
        for (i = 0; i < irq_ptr->nr_input_qs &&         \
@@ -345,11 +320,6 @@ static inline void qdio_deliver_irq(struct qdio_irq *irq)
 #define sub_buf(bufnr, dec)    QDIO_BUFNR((bufnr) - (dec))
 #define prev_buf(bufnr)                sub_buf(bufnr, 1)
 
-#define queue_irqs_enabled(q)                  \
-       (test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) == 0)
-#define queue_irqs_disabled(q)                 \
-       (test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) != 0)
-
 extern u64 last_ai_time;
 
 /* prototypes for thin interrupt */
@@ -360,8 +330,6 @@ void qdio_thinint_exit(void);
 int test_nonshared_ind(struct qdio_irq *);
 
 /* prototypes for setup */
-void qdio_outbound_tasklet(struct tasklet_struct *t);
-void qdio_outbound_timer(struct timer_list *t);
 void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
                      struct irb *irb);
 int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs,
index 00384f5..4bb7965 100644 (file)
@@ -197,8 +197,6 @@ DEFINE_SHOW_ATTRIBUTE(ssqd);
 static char *qperf_names[] = {
        "Assumed adapter interrupts",
        "QDIO interrupts",
-       "Requested PCIs",
-       "Outbound tasklet runs",
        "SIGA read",
        "SIGA write",
        "SIGA sync",
@@ -206,7 +204,6 @@ static char *qperf_names[] = {
        "Inbound stop_polling",
        "Inbound queue full",
        "Outbound calls",
-       "Outbound handler",
        "Outbound queue full",
        "Outbound fast_requeue",
        "Outbound target_full",
index 3052fab..45e810c 100644 (file)
@@ -10,7 +10,6 @@
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
-#include <linux/timer.h>
 #include <linux/delay.h>
 #include <linux/gfp.h>
 #include <linux/io.h>
@@ -304,12 +303,22 @@ static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
        return (cc) ? -EIO : 0;
 }
 
+static inline int qdio_sync_input_queue(struct qdio_q *q)
+{
+       return qdio_siga_sync(q, 0, q->mask);
+}
+
+static inline int qdio_sync_output_queue(struct qdio_q *q)
+{
+       return qdio_siga_sync(q, q->mask, 0);
+}
+
 static inline int qdio_siga_sync_q(struct qdio_q *q)
 {
        if (q->is_input_q)
-               return qdio_siga_sync(q, 0, q->mask);
+               return qdio_sync_input_queue(q);
        else
-               return qdio_siga_sync(q, q->mask, 0);
+               return qdio_sync_output_queue(q);
 }
 
 static int qdio_siga_output(struct qdio_q *q, unsigned int count,
@@ -373,22 +382,10 @@ static inline int qdio_siga_input(struct qdio_q *q)
        return (cc) ? -EIO : 0;
 }
 
-#define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0)
-#define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U)
-
-static inline void qdio_sync_queues(struct qdio_q *q)
-{
-       /* PCI capable outbound queues will also be scanned so sync them too */
-       if (pci_out_supported(q->irq_ptr))
-               qdio_siga_sync_all(q);
-       else
-               qdio_siga_sync_q(q);
-}
-
 int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
                        unsigned char *state)
 {
-       if (need_siga_sync(q))
+       if (qdio_need_siga_sync(q->irq_ptr))
                qdio_siga_sync_q(q);
        return get_buf_state(q, bufnr, state, 0);
 }
@@ -455,10 +452,9 @@ static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start,
        if (!count)
                return 0;
 
-       /*
-        * No siga sync here, as a PCI or we after a thin interrupt
-        * already sync'ed the queues.
-        */
+       if (qdio_need_siga_sync(q->irq_ptr))
+               qdio_sync_input_queue(q);
+
        count = get_buf_states(q, start, &state, count, 1);
        if (!count)
                return 0;
@@ -510,8 +506,8 @@ static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start)
        if (!atomic_read(&q->nr_buf_used))
                return 1;
 
-       if (need_siga_sync(q))
-               qdio_siga_sync_q(q);
+       if (qdio_need_siga_sync(q->irq_ptr))
+               qdio_sync_input_queue(q);
        get_buf_state(q, start, &state, 0);
 
        if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
@@ -521,15 +517,6 @@ static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start)
        return 1;
 }
 
-static inline int qdio_tasklet_schedule(struct qdio_q *q)
-{
-       if (likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) {
-               tasklet_schedule(&q->u.out.tasklet);
-               return 0;
-       }
-       return -EPERM;
-}
-
 static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start,
                                        unsigned int *error)
 {
@@ -538,17 +525,13 @@ static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start,
 
        q->timestamp = get_tod_clock_fast();
 
-       if (need_siga_sync(q))
-               if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
-                   !pci_out_supported(q->irq_ptr)) ||
-                   (queue_type(q) == QDIO_IQDIO_QFMT &&
-                   multicast_outbound(q)))
-                       qdio_siga_sync_q(q);
-
        count = atomic_read(&q->nr_buf_used);
        if (!count)
                return 0;
 
+       if (qdio_need_siga_sync(q->irq_ptr))
+               qdio_sync_output_queue(q);
+
        count = get_buf_states(q, start, &state, count, 0);
        if (!count)
                return 0;
@@ -595,19 +578,13 @@ static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start,
        }
 }
 
-/* all buffers processed? */
-static inline int qdio_outbound_q_done(struct qdio_q *q)
-{
-       return atomic_read(&q->nr_buf_used) == 0;
-}
-
 static int qdio_kick_outbound_q(struct qdio_q *q, unsigned int count,
                                unsigned long aob)
 {
        int retries = 0, cc;
        unsigned int busy_bit;
 
-       if (!need_siga_out(q))
+       if (!qdio_need_siga_out(q->irq_ptr))
                return 0;
 
        DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
@@ -644,75 +621,6 @@ retry:
        return cc;
 }
 
-void qdio_outbound_tasklet(struct tasklet_struct *t)
-{
-       struct qdio_output_q *out_q = from_tasklet(out_q, t, tasklet);
-       struct qdio_q *q = container_of(out_q, struct qdio_q, u.out);
-       unsigned int start = q->first_to_check;
-       unsigned int error = 0;
-       int count;
-
-       qperf_inc(q, tasklet_outbound);
-       WARN_ON_ONCE(atomic_read(&q->nr_buf_used) < 0);
-
-       count = get_outbound_buffer_frontier(q, start, &error);
-       if (count) {
-               q->first_to_check = add_buf(start, count);
-
-               if (q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE) {
-                       qperf_inc(q, outbound_handler);
-                       DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
-                                     start, count);
-
-                       q->handler(q->irq_ptr->cdev, error, q->nr, start,
-                                  count, q->irq_ptr->int_parm);
-               }
-       }
-
-       if (queue_type(q) == QDIO_ZFCP_QFMT && !pci_out_supported(q->irq_ptr) &&
-           !qdio_outbound_q_done(q))
-               goto sched;
-
-       if (q->u.out.pci_out_enabled)
-               return;
-
-       /*
-        * Now we know that queue type is either qeth without pci enabled
-        * or HiperSockets. Make sure buffer switch from PRIMED to EMPTY
-        * is noticed and outbound_handler is called after some time.
-        */
-       if (qdio_outbound_q_done(q))
-               del_timer_sync(&q->u.out.timer);
-       else
-               if (!timer_pending(&q->u.out.timer) &&
-                   likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE))
-                       mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
-       return;
-
-sched:
-       qdio_tasklet_schedule(q);
-}
-
-void qdio_outbound_timer(struct timer_list *t)
-{
-       struct qdio_q *q = from_timer(q, t, u.out.timer);
-
-       qdio_tasklet_schedule(q);
-}
-
-static inline void qdio_check_outbound_pci_queues(struct qdio_irq *irq)
-{
-       struct qdio_q *out;
-       int i;
-
-       if (!pci_out_supported(irq) || !irq->scan_threshold)
-               return;
-
-       for_each_output_queue(irq, out, i)
-               if (!qdio_outbound_q_done(out))
-                       qdio_tasklet_schedule(out);
-}
-
 static inline void qdio_set_state(struct qdio_irq *irq_ptr,
                                  enum qdio_irq_states state)
 {
@@ -734,25 +642,11 @@ static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb)
 /* PCI interrupt handler */
 static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
 {
-       int i;
-       struct qdio_q *q;
-
        if (unlikely(irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
                return;
 
        qdio_deliver_irq(irq_ptr);
        irq_ptr->last_data_irq_time = S390_lowcore.int_clock;
-
-       if (!pci_out_supported(irq_ptr) || !irq_ptr->scan_threshold)
-               return;
-
-       for_each_output_queue(irq_ptr, q, i) {
-               if (qdio_outbound_q_done(q))
-                       continue;
-               if (need_siga_sync(q) && need_siga_sync_out_after_pci(q))
-                       qdio_siga_sync_q(q);
-               qdio_tasklet_schedule(q);
-       }
 }
 
 static void qdio_handle_activate_check(struct qdio_irq *irq_ptr,
@@ -879,15 +773,34 @@ int qdio_get_ssqd_desc(struct ccw_device *cdev,
 }
 EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
 
-static void qdio_shutdown_queues(struct qdio_irq *irq_ptr)
+static int qdio_cancel_ccw(struct qdio_irq *irq, int how)
 {
-       struct qdio_q *q;
-       int i;
+       struct ccw_device *cdev = irq->cdev;
+       long timeout;
+       int rc;
 
-       for_each_output_queue(irq_ptr, q, i) {
-               del_timer_sync(&q->u.out.timer);
-               tasklet_kill(&q->u.out.tasklet);
+       spin_lock_irq(get_ccwdev_lock(cdev));
+       qdio_set_state(irq, QDIO_IRQ_STATE_CLEANUP);
+       if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
+               rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
+       else
+               /* default behaviour is halt */
+               rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
+       spin_unlock_irq(get_ccwdev_lock(cdev));
+       if (rc) {
+               DBF_ERROR("%4x SHUTD ERR", irq->schid.sch_no);
+               DBF_ERROR("rc:%4d", rc);
+               return rc;
        }
+
+       timeout = wait_event_interruptible_timeout(cdev->private->wait_q,
+                                                  irq->state == QDIO_IRQ_STATE_INACTIVE ||
+                                                  irq->state == QDIO_IRQ_STATE_ERR,
+                                                  10 * HZ);
+       if (timeout <= 0)
+               rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
+
+       return rc;
 }
 
 /**
@@ -919,35 +832,13 @@ int qdio_shutdown(struct ccw_device *cdev, int how)
        }
 
        /*
-        * Indicate that the device is going down. Scheduling the queue
-        * tasklets is forbidden from here on.
+        * Indicate that the device is going down.
         */
        qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
 
-       qdio_shutdown_queues(irq_ptr);
        qdio_shutdown_debug_entries(irq_ptr);
 
-       /* cleanup subchannel */
-       spin_lock_irq(get_ccwdev_lock(cdev));
-       qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
-       if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
-               rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
-       else
-               /* default behaviour is halt */
-               rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
-       spin_unlock_irq(get_ccwdev_lock(cdev));
-       if (rc) {
-               DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no);
-               DBF_ERROR("rc:%4d", rc);
-               goto no_cleanup;
-       }
-
-       wait_event_interruptible_timeout(cdev->private->wait_q,
-               irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
-               irq_ptr->state == QDIO_IRQ_STATE_ERR,
-               10 * HZ);
-
-no_cleanup:
+       rc = qdio_cancel_ccw(irq_ptr, how);
        qdio_shutdown_thinint(irq_ptr);
        qdio_shutdown_irq(irq_ptr);
 
@@ -1061,8 +952,6 @@ static void qdio_trace_init_data(struct qdio_irq *irq,
        DBF_DEV_EVENT(DBF_ERR, irq, "qfmt:%1u", data->q_format);
        DBF_DEV_EVENT(DBF_ERR, irq, "qpff%4x", data->qib_param_field_format);
        DBF_DEV_HEX(irq, &data->qib_param_field, sizeof(void *), DBF_ERR);
-       DBF_DEV_HEX(irq, &data->input_slib_elements, sizeof(void *), DBF_ERR);
-       DBF_DEV_HEX(irq, &data->output_slib_elements, sizeof(void *), DBF_ERR);
        DBF_DEV_EVENT(DBF_ERR, irq, "niq:%1u noq:%1u", data->no_input_qs,
                      data->no_output_qs);
        DBF_DEV_HEX(irq, &data->input_handler, sizeof(void *), DBF_ERR);
@@ -1083,6 +972,7 @@ int qdio_establish(struct ccw_device *cdev,
 {
        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
        struct subchannel_id schid;
+       long timeout;
        int rc;
 
        ccw_device_get_schid(cdev, &schid);
@@ -1111,17 +1001,14 @@ int qdio_establish(struct ccw_device *cdev,
        qdio_setup_irq(irq_ptr, init_data);
 
        rc = qdio_establish_thinint(irq_ptr);
-       if (rc) {
-               qdio_shutdown_irq(irq_ptr);
-               mutex_unlock(&irq_ptr->setup_mutex);
-               return rc;
-       }
+       if (rc)
+               goto err_thinint;
 
        /* establish q */
        irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
        irq_ptr->ccw.flags = CCW_FLAG_SLI;
        irq_ptr->ccw.count = irq_ptr->equeue.count;
-       irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
+       irq_ptr->ccw.cda = (u32) virt_to_phys(irq_ptr->qdr);
 
        spin_lock_irq(get_ccwdev_lock(cdev));
        ccw_device_set_options_mask(cdev, 0);
@@ -1131,20 +1018,20 @@ int qdio_establish(struct ccw_device *cdev,
        if (rc) {
                DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
                DBF_ERROR("rc:%4x", rc);
-               qdio_shutdown_thinint(irq_ptr);
-               qdio_shutdown_irq(irq_ptr);
-               mutex_unlock(&irq_ptr->setup_mutex);
-               return rc;
+               goto err_ccw_start;
        }
 
-       wait_event_interruptible_timeout(cdev->private->wait_q,
-               irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
-               irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
+       timeout = wait_event_interruptible_timeout(cdev->private->wait_q,
+                                                  irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
+                                                  irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
+       if (timeout <= 0) {
+               rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
+               goto err_ccw_timeout;
+       }
 
        if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
-               mutex_unlock(&irq_ptr->setup_mutex);
-               qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
-               return -EIO;
+               rc = -EIO;
+               goto err_ccw_error;
        }
 
        qdio_setup_ssqd_info(irq_ptr);
@@ -1156,6 +1043,17 @@ int qdio_establish(struct ccw_device *cdev,
        qdio_print_subchannel_info(irq_ptr);
        qdio_setup_debug_entries(irq_ptr);
        return 0;
+
+err_ccw_timeout:
+       qdio_cancel_ccw(irq_ptr, QDIO_FLAG_CLEANUP_USING_CLEAR);
+err_ccw_error:
+err_ccw_start:
+       qdio_shutdown_thinint(irq_ptr);
+err_thinint:
+       qdio_shutdown_irq(irq_ptr);
+       qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
+       mutex_unlock(&irq_ptr->setup_mutex);
+       return rc;
 }
 EXPORT_SYMBOL_GPL(qdio_establish);
 
@@ -1219,12 +1117,10 @@ EXPORT_SYMBOL_GPL(qdio_activate);
 /**
  * handle_inbound - reset processed input buffers
  * @q: queue containing the buffers
- * @callflags: flags
  * @bufnr: first buffer to process
  * @count: how many buffers are emptied
  */
-static int handle_inbound(struct qdio_q *q, unsigned int callflags,
-                         int bufnr, int count)
+static int handle_inbound(struct qdio_q *q, int bufnr, int count)
 {
        int overlap;
 
@@ -1241,7 +1137,7 @@ static int handle_inbound(struct qdio_q *q, unsigned int callflags,
        count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
        atomic_add(count, &q->nr_buf_used);
 
-       if (need_siga_in(q))
+       if (qdio_need_siga_in(q->irq_ptr))
                return qdio_siga_input(q);
 
        return 0;
@@ -1250,16 +1146,13 @@ static int handle_inbound(struct qdio_q *q, unsigned int callflags,
 /**
  * handle_outbound - process filled outbound buffers
  * @q: queue containing the buffers
- * @callflags: flags
  * @bufnr: first buffer to process
  * @count: how many buffers are filled
  * @aob: asynchronous operation block
  */
-static int handle_outbound(struct qdio_q *q, unsigned int callflags,
-                          unsigned int bufnr, unsigned int count,
+static int handle_outbound(struct qdio_q *q, unsigned int bufnr, unsigned int count,
                           struct qaob *aob)
 {
-       const unsigned int scan_threshold = q->irq_ptr->scan_threshold;
        unsigned char state = 0;
        int used, rc = 0;
 
@@ -1271,19 +1164,13 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
        if (used == QDIO_MAX_BUFFERS_PER_Q)
                qperf_inc(q, outbound_queue_full);
 
-       if (callflags & QDIO_FLAG_PCI_OUT) {
-               q->u.out.pci_out_enabled = 1;
-               qperf_inc(q, pci_request_int);
-       } else
-               q->u.out.pci_out_enabled = 0;
-
        if (queue_type(q) == QDIO_IQDIO_QFMT) {
                unsigned long phys_aob = aob ? virt_to_phys(aob) : 0;
 
                WARN_ON_ONCE(!IS_ALIGNED(phys_aob, 256));
                rc = qdio_kick_outbound_q(q, count, phys_aob);
-       } else if (need_siga_sync(q)) {
-               rc = qdio_siga_sync_q(q);
+       } else if (qdio_need_siga_sync(q->irq_ptr)) {
+               rc = qdio_sync_output_queue(q);
        } else if (count < QDIO_MAX_BUFFERS_PER_Q &&
                   get_buf_state(q, prev_buf(bufnr), &state, 0) > 0 &&
                   state == SLSB_CU_OUTPUT_PRIMED) {
@@ -1293,18 +1180,6 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
                rc = qdio_kick_outbound_q(q, count, 0);
        }
 
-       /* Let drivers implement their own completion scanning: */
-       if (!scan_threshold)
-               return rc;
-
-       /* in case of SIGA errors we must process the error immediately */
-       if (used >= scan_threshold || rc)
-               qdio_tasklet_schedule(q);
-       else
-               /* free the SBALs in case of no further traffic */
-               if (!timer_pending(&q->u.out.timer) &&
-                   likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE))
-                       mod_timer(&q->u.out.timer, jiffies + HZ);
        return rc;
 }
 
@@ -1336,11 +1211,9 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
        if (!count)
                return 0;
        if (callflags & QDIO_FLAG_SYNC_INPUT)
-               return handle_inbound(irq_ptr->input_qs[q_nr],
-                                     callflags, bufnr, count);
+               return handle_inbound(irq_ptr->input_qs[q_nr], bufnr, count);
        else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
-               return handle_outbound(irq_ptr->output_qs[q_nr],
-                                      callflags, bufnr, count, aob);
+               return handle_outbound(irq_ptr->output_qs[q_nr], bufnr, count, aob);
        return -EINVAL;
 }
 EXPORT_SYMBOL_GPL(do_QDIO);
@@ -1420,52 +1293,10 @@ int qdio_inspect_queue(struct ccw_device *cdev, unsigned int nr, bool is_input,
                return -ENODEV;
        q = is_input ? irq_ptr->input_qs[nr] : irq_ptr->output_qs[nr];
 
-       if (need_siga_sync(q))
-               qdio_siga_sync_q(q);
-
        return __qdio_inspect_queue(q, bufnr, error);
 }
 EXPORT_SYMBOL_GPL(qdio_inspect_queue);
 
-/**
- * qdio_get_next_buffers - process input buffers
- * @cdev: associated ccw_device for the qdio subchannel
- * @nr: input queue number
- * @bufnr: first filled buffer number
- * @error: buffers are in error state
- *
- * Return codes
- *   < 0 - error
- *   = 0 - no new buffers found
- *   > 0 - number of processed buffers
- */
-int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
-                         int *error)
-{
-       struct qdio_q *q;
-       struct qdio_irq *irq_ptr = cdev->private->qdio_data;
-
-       if (!irq_ptr)
-               return -ENODEV;
-       q = irq_ptr->input_qs[nr];
-
-       /*
-        * Cannot rely on automatic sync after interrupt since queues may
-        * also be examined without interrupt.
-        */
-       if (need_siga_sync(q))
-               qdio_sync_queues(q);
-
-       qdio_check_outbound_pci_queues(irq_ptr);
-
-       /* Note: upper-layer MUST stop processing immediately here ... */
-       if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
-               return -EIO;
-
-       return __qdio_inspect_queue(q, bufnr, error);
-}
-EXPORT_SYMBOL(qdio_get_next_buffers);
-
 /**
  * qdio_stop_irq - disable interrupt processing for the device
  * @cdev: associated ccw_device for the qdio subchannel
index da67e49..20efafe 100644 (file)
@@ -89,55 +89,6 @@ void qdio_reset_buffers(struct qdio_buffer **buf, unsigned int count)
 }
 EXPORT_SYMBOL_GPL(qdio_reset_buffers);
 
-/*
- * qebsm is only available under 64bit but the adapter sets the feature
- * flag anyway, so we manually override it.
- */
-static inline int qebsm_possible(void)
-{
-       return css_general_characteristics.qebsm;
-}
-
-/*
- * qib_param_field: pointer to 128 bytes or NULL, if no param field
- * nr_input_qs: pointer to nr_queues*128 words of data or NULL
- */
-static void set_impl_params(struct qdio_irq *irq_ptr,
-                           unsigned int qib_param_field_format,
-                           unsigned char *qib_param_field,
-                           unsigned long *input_slib_elements,
-                           unsigned long *output_slib_elements)
-{
-       struct qdio_q *q;
-       int i, j;
-
-       if (!irq_ptr)
-               return;
-
-       irq_ptr->qib.pfmt = qib_param_field_format;
-       if (qib_param_field)
-               memcpy(irq_ptr->qib.parm, qib_param_field,
-                      sizeof(irq_ptr->qib.parm));
-
-       if (!input_slib_elements)
-               goto output;
-
-       for_each_input_queue(irq_ptr, q, i) {
-               for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
-                       q->slib->slibe[j].parms =
-                               input_slib_elements[i * QDIO_MAX_BUFFERS_PER_Q + j];
-       }
-output:
-       if (!output_slib_elements)
-               return;
-
-       for_each_output_queue(irq_ptr, q, i) {
-               for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
-                       q->slib->slibe[j].parms =
-                               output_slib_elements[i * QDIO_MAX_BUFFERS_PER_Q + j];
-       }
-}
-
 static void __qdio_free_queues(struct qdio_q **queues, unsigned int count)
 {
        struct qdio_q *q;
@@ -267,26 +218,9 @@ static void setup_queues(struct qdio_irq *irq_ptr,
                q->is_input_q = 0;
                setup_storage_lists(q, irq_ptr,
                                    qdio_init->output_sbal_addr_array[i], i);
-
-               tasklet_setup(&q->u.out.tasklet, qdio_outbound_tasklet);
-               timer_setup(&q->u.out.timer, qdio_outbound_timer, 0);
        }
 }
 
-static void process_ac_flags(struct qdio_irq *irq_ptr, unsigned char qdioac)
-{
-       if (qdioac & AC1_SIGA_INPUT_NEEDED)
-               irq_ptr->siga_flag.input = 1;
-       if (qdioac & AC1_SIGA_OUTPUT_NEEDED)
-               irq_ptr->siga_flag.output = 1;
-       if (qdioac & AC1_SIGA_SYNC_NEEDED)
-               irq_ptr->siga_flag.sync = 1;
-       if (!(qdioac & AC1_AUTOMATIC_SYNC_ON_THININT))
-               irq_ptr->siga_flag.sync_after_ai = 1;
-       if (!(qdioac & AC1_AUTOMATIC_SYNC_ON_OUT_PCI))
-               irq_ptr->siga_flag.sync_out_after_pci = 1;
-}
-
 static void check_and_setup_qebsm(struct qdio_irq *irq_ptr,
                                  unsigned char qdioac, unsigned long token)
 {
@@ -363,7 +297,7 @@ void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr)
                qdioac = irq_ptr->ssqd_desc.qdioac1;
 
        check_and_setup_qebsm(irq_ptr, qdioac, irq_ptr->ssqd_desc.sch_token);
-       process_ac_flags(irq_ptr, qdioac);
+       irq_ptr->qdioac1 = qdioac;
        DBF_EVENT("ac 1:%2x 2:%4x", qdioac, irq_ptr->ssqd_desc.qdioac2);
        DBF_EVENT("3:%4x qib:%4x", irq_ptr->ssqd_desc.qdioac3, irq_ptr->qib.ac);
 }
@@ -386,6 +320,8 @@ static void setup_qdr(struct qdio_irq *irq_ptr,
        struct qdesfmt0 *desc = &irq_ptr->qdr->qdf0[0];
        int i;
 
+       memset(irq_ptr->qdr, 0, sizeof(struct qdr));
+
        irq_ptr->qdr->qfmt = qdio_init->q_format;
        irq_ptr->qdr->ac = qdio_init->qdr_ac;
        irq_ptr->qdr->iqdcnt = qdio_init->no_input_qs;
@@ -405,12 +341,15 @@ static void setup_qdr(struct qdio_irq *irq_ptr,
 static void setup_qib(struct qdio_irq *irq_ptr,
                      struct qdio_initialize *init_data)
 {
-       if (qebsm_possible())
-               irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM;
-
-       irq_ptr->qib.rflags |= init_data->qib_rflags;
+       memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib));
 
        irq_ptr->qib.qfmt = init_data->q_format;
+       irq_ptr->qib.pfmt = init_data->qib_param_field_format;
+
+       irq_ptr->qib.rflags = init_data->qib_rflags;
+       if (css_general_characteristics.qebsm)
+               irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM;
+
        if (init_data->no_input_qs)
                irq_ptr->qib.isliba =
                        (unsigned long)(irq_ptr->input_qs[0]->slib);
@@ -419,6 +358,10 @@ static void setup_qib(struct qdio_irq *irq_ptr,
                        (unsigned long)(irq_ptr->output_qs[0]->slib);
        memcpy(irq_ptr->qib.ebcnam, dev_name(&irq_ptr->cdev->dev), 8);
        ASCEBC(irq_ptr->qib.ebcnam, 8);
+
+       if (init_data->qib_param_field)
+               memcpy(irq_ptr->qib.parm, init_data->qib_param_field,
+                      sizeof(irq_ptr->qib.parm));
 }
 
 int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data)
@@ -426,8 +369,7 @@ int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data)
        struct ccw_device *cdev = irq_ptr->cdev;
        struct ciw *ciw;
 
-       memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib));
-       memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag));
+       irq_ptr->qdioac1 = 0;
        memset(&irq_ptr->ccw, 0, sizeof(irq_ptr->ccw));
        memset(&irq_ptr->ssqd_desc, 0, sizeof(irq_ptr->ssqd_desc));
        memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat));
@@ -436,13 +378,9 @@ int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data)
        irq_ptr->sch_token = irq_ptr->perf_stat_enabled = 0;
        irq_ptr->state = QDIO_IRQ_STATE_INACTIVE;
 
-       /* wipes qib.ac, required by ar7063 */
-       memset(irq_ptr->qdr, 0, sizeof(struct qdr));
-
        irq_ptr->int_parm = init_data->int_parm;
        irq_ptr->nr_input_qs = init_data->no_input_qs;
        irq_ptr->nr_output_qs = init_data->no_output_qs;
-       irq_ptr->scan_threshold = init_data->scan_threshold;
        ccw_device_get_schid(cdev, &irq_ptr->schid);
        setup_queues(irq_ptr, init_data);
 
@@ -450,10 +388,6 @@ int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data)
        set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state);
 
        setup_qib(irq_ptr, init_data);
-       set_impl_params(irq_ptr, init_data->qib_param_field_format,
-                       init_data->qib_param_field,
-                       init_data->input_slib_elements,
-                       init_data->output_slib_elements);
 
        /* fill input and output descriptors */
        setup_qdr(irq_ptr, init_data);
@@ -497,11 +431,8 @@ void qdio_shutdown_irq(struct qdio_irq *irq)
 
 void qdio_print_subchannel_info(struct qdio_irq *irq_ptr)
 {
-       char s[80];
-
-       snprintf(s, 80, "qdio: %s %s on SC %x using "
-                "AI:%d QEBSM:%d PRI:%d TDD:%d SIGA:%s%s%s%s%s\n",
-                dev_name(&irq_ptr->cdev->dev),
+       dev_info(&irq_ptr->cdev->dev,
+                "qdio: %s on SC %x using AI:%d QEBSM:%d PRI:%d TDD:%d SIGA:%s%s%s\n",
                 (irq_ptr->qib.qfmt == QDIO_QETH_QFMT) ? "OSA" :
                        ((irq_ptr->qib.qfmt == QDIO_ZFCP_QFMT) ? "ZFCP" : "HS"),
                 irq_ptr->schid.sch_no,
@@ -509,12 +440,9 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr)
                 (irq_ptr->sch_token) ? 1 : 0,
                 pci_out_supported(irq_ptr) ? 1 : 0,
                 css_general_characteristics.aif_tdd,
-                (irq_ptr->siga_flag.input) ? "R" : " ",
-                (irq_ptr->siga_flag.output) ? "W" : " ",
-                (irq_ptr->siga_flag.sync) ? "S" : " ",
-                (irq_ptr->siga_flag.sync_after_ai) ? "A" : " ",
-                (irq_ptr->siga_flag.sync_out_after_pci) ? "P" : " ");
-       printk(KERN_INFO "%s", s);
+                qdio_need_siga_in(irq_ptr) ? "R" : " ",
+                qdio_need_siga_out(irq_ptr) ? "W" : " ",
+                qdio_need_siga_sync(irq_ptr) ? "S" : " ");
 }
 
 int __init qdio_setup_init(void)
@@ -541,7 +469,7 @@ int __init qdio_setup_init(void)
                  (css_general_characteristics.aif_osa) ? 1 : 0);
 
        /* Check for QEBSM support in general (bit 58). */
-       DBF_EVENT("cssQEBSM:%1d", (qebsm_possible()) ? 1 : 0);
+       DBF_EVENT("cssQEBSM:%1d", css_general_characteristics.qebsm);
        rc = 0;
 out:
        return rc;
index 8d3a1d8..439c1f6 100644 (file)
@@ -127,22 +127,13 @@ static struct bus_type ap_bus_type;
 /* Adapter interrupt definitions */
 static void ap_interrupt_handler(struct airq_struct *airq, bool floating);
 
-static int ap_airq_flag;
+static bool ap_irq_flag;
 
 static struct airq_struct ap_airq = {
        .handler = ap_interrupt_handler,
        .isc = AP_ISC,
 };
 
-/**
- * ap_using_interrupts() - Returns non-zero if interrupt support is
- * available.
- */
-static inline int ap_using_interrupts(void)
-{
-       return ap_airq_flag;
-}
-
 /**
  * ap_airq_ptr() - Get the address of the adapter interrupt indicator
  *
@@ -152,7 +143,7 @@ static inline int ap_using_interrupts(void)
  */
 void *ap_airq_ptr(void)
 {
-       if (ap_using_interrupts())
+       if (ap_irq_flag)
                return ap_airq.lsi_ptr;
        return NULL;
 }
@@ -396,7 +387,7 @@ void ap_wait(enum ap_sm_wait wait)
        switch (wait) {
        case AP_SM_WAIT_AGAIN:
        case AP_SM_WAIT_INTERRUPT:
-               if (ap_using_interrupts())
+               if (ap_irq_flag)
                        break;
                if (ap_poll_kthread) {
                        wake_up(&ap_poll_wait);
@@ -471,7 +462,7 @@ static void ap_tasklet_fn(unsigned long dummy)
         * be received. Doing it in the beginning of the tasklet is therefor
         * important that no requests on any AP get lost.
         */
-       if (ap_using_interrupts())
+       if (ap_irq_flag)
                xchg(ap_airq.lsi_ptr, 0);
 
        spin_lock_bh(&ap_queues_lock);
@@ -541,7 +532,7 @@ static int ap_poll_thread_start(void)
 {
        int rc;
 
-       if (ap_using_interrupts() || ap_poll_kthread)
+       if (ap_irq_flag || ap_poll_kthread)
                return 0;
        mutex_lock(&ap_poll_thread_mutex);
        ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll");
@@ -703,7 +694,7 @@ static int __ap_calc_helper(struct device *dev, void *arg)
 
        if (is_queue_dev(dev)) {
                pctrs->apqns++;
-               if ((to_ap_dev(dev))->drv)
+               if (dev->driver)
                        pctrs->bound++;
        }
 
@@ -883,7 +874,6 @@ static int ap_device_probe(struct device *dev)
                         to_ap_queue(dev)->qid);
        spin_unlock_bh(&ap_queues_lock);
 
-       ap_dev->drv = ap_drv;
        rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
 
        if (rc) {
@@ -891,7 +881,6 @@ static int ap_device_probe(struct device *dev)
                if (is_queue_dev(dev))
                        hash_del(&to_ap_queue(dev)->hnode);
                spin_unlock_bh(&ap_queues_lock);
-               ap_dev->drv = NULL;
        } else
                ap_check_bindings_complete();
 
@@ -904,7 +893,7 @@ out:
 static int ap_device_remove(struct device *dev)
 {
        struct ap_device *ap_dev = to_ap_dev(dev);
-       struct ap_driver *ap_drv = ap_dev->drv;
+       struct ap_driver *ap_drv = to_ap_drv(dev->driver);
 
        /* prepare ap queue device removal */
        if (is_queue_dev(dev))
@@ -923,7 +912,6 @@ static int ap_device_remove(struct device *dev)
        if (is_queue_dev(dev))
                hash_del(&to_ap_queue(dev)->hnode);
        spin_unlock_bh(&ap_queues_lock);
-       ap_dev->drv = NULL;
 
        put_device(dev);
 
@@ -1187,7 +1175,7 @@ static BUS_ATTR_RO(ap_adapter_mask);
 static ssize_t ap_interrupts_show(struct bus_type *bus, char *buf)
 {
        return scnprintf(buf, PAGE_SIZE, "%d\n",
-                        ap_using_interrupts() ? 1 : 0);
+                        ap_irq_flag ? 1 : 0);
 }
 
 static BUS_ATTR_RO(ap_interrupts);
@@ -1912,7 +1900,7 @@ static int __init ap_module_init(void)
        /* enable interrupts if available */
        if (ap_interrupts_available()) {
                rc = register_adapter_interrupt(&ap_airq);
-               ap_airq_flag = (rc == 0);
+               ap_irq_flag = (rc == 0);
        }
 
        /* Create /sys/bus/ap. */
@@ -1956,7 +1944,7 @@ out_work:
 out_bus:
        bus_unregister(&ap_bus_type);
 out:
-       if (ap_using_interrupts())
+       if (ap_irq_flag)
                unregister_adapter_interrupt(&ap_airq);
        kfree(ap_qci_info);
        return rc;
index 8f18abd..95b5777 100644 (file)
@@ -80,12 +80,6 @@ static inline int ap_test_bit(unsigned int *ptr, unsigned int nr)
 #define AP_FUNC_EP11  5
 #define AP_FUNC_APXA  6
 
-/*
- * AP interrupt states
- */
-#define AP_INTR_DISABLED       0       /* AP interrupt disabled */
-#define AP_INTR_ENABLED                1       /* AP interrupt enabled */
-
 /*
  * AP queue state machine states
  */
@@ -112,7 +106,7 @@ enum ap_sm_event {
  * AP queue state wait behaviour
  */
 enum ap_sm_wait {
-       AP_SM_WAIT_AGAIN,       /* retry immediately */
+       AP_SM_WAIT_AGAIN = 0,   /* retry immediately */
        AP_SM_WAIT_TIMEOUT,     /* wait for timeout */
        AP_SM_WAIT_INTERRUPT,   /* wait for thin interrupt (if available) */
        AP_SM_WAIT_NONE,        /* no wait */
@@ -157,7 +151,6 @@ void ap_driver_unregister(struct ap_driver *);
 
 struct ap_device {
        struct device device;
-       struct ap_driver *drv;          /* Pointer to AP device driver. */
        int device_type;                /* AP device type. */
 };
 
@@ -165,7 +158,6 @@ struct ap_device {
 
 struct ap_card {
        struct ap_device ap_dev;
-       void *private;                  /* ap driver private pointer. */
        int raw_hwtype;                 /* AP raw hardware type. */
        unsigned int functions;         /* AP device function bitfield. */
        int queue_depth;                /* AP queue depth.*/
@@ -182,11 +174,10 @@ struct ap_queue {
        struct hlist_node hnode;        /* Node for the ap_queues hashtable */
        struct ap_card *card;           /* Ptr to assoc. AP card. */
        spinlock_t lock;                /* Per device lock. */
-       void *private;                  /* ap driver private pointer. */
        enum ap_dev_state dev_state;    /* queue device state */
        bool config;                    /* configured state */
        ap_qid_t qid;                   /* AP queue id. */
-       int interrupt;                  /* indicate if interrupts are enabled */
+       bool interrupt;                 /* indicate if interrupts are enabled */
        int queue_count;                /* # messages currently on AP queue. */
        int pendingq_count;             /* # requests on pendingq list. */
        int requestq_count;             /* # requests on requestq list. */
index 669f96f..d70c4d3 100644 (file)
@@ -19,7 +19,7 @@
 static void __ap_flush_queue(struct ap_queue *aq);
 
 /**
- * ap_queue_enable_interruption(): Enable interruption on an AP queue.
+ * ap_queue_enable_irq(): Enable interrupt support on this AP queue.
  * @qid: The AP queue number
  * @ind: the notification indicator byte
  *
@@ -27,7 +27,7 @@ static void __ap_flush_queue(struct ap_queue *aq);
  * value it waits a while and tests the AP queue if interrupts
  * have been switched on using ap_test_queue().
  */
-static int ap_queue_enable_interruption(struct ap_queue *aq, void *ind)
+static int ap_queue_enable_irq(struct ap_queue *aq, void *ind)
 {
        struct ap_queue_status status;
        struct ap_qirq_ctrl qirqctrl = { 0 };
@@ -218,7 +218,8 @@ static enum ap_sm_wait ap_sm_read(struct ap_queue *aq)
                return AP_SM_WAIT_NONE;
        case AP_RESPONSE_NO_PENDING_REPLY:
                if (aq->queue_count > 0)
-                       return AP_SM_WAIT_INTERRUPT;
+                       return aq->interrupt ?
+                               AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_TIMEOUT;
                aq->sm_state = AP_SM_STATE_IDLE;
                return AP_SM_WAIT_NONE;
        default:
@@ -272,7 +273,8 @@ static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
                fallthrough;
        case AP_RESPONSE_Q_FULL:
                aq->sm_state = AP_SM_STATE_QUEUE_FULL;
-               return AP_SM_WAIT_INTERRUPT;
+               return aq->interrupt ?
+                       AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_TIMEOUT;
        case AP_RESPONSE_RESET_IN_PROGRESS:
                aq->sm_state = AP_SM_STATE_RESET_WAIT;
                return AP_SM_WAIT_TIMEOUT;
@@ -322,7 +324,7 @@ static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq)
        case AP_RESPONSE_NORMAL:
        case AP_RESPONSE_RESET_IN_PROGRESS:
                aq->sm_state = AP_SM_STATE_RESET_WAIT;
-               aq->interrupt = AP_INTR_DISABLED;
+               aq->interrupt = false;
                return AP_SM_WAIT_TIMEOUT;
        default:
                aq->dev_state = AP_DEV_STATE_ERROR;
@@ -355,7 +357,7 @@ static enum ap_sm_wait ap_sm_reset_wait(struct ap_queue *aq)
        switch (status.response_code) {
        case AP_RESPONSE_NORMAL:
                lsi_ptr = ap_airq_ptr();
-               if (lsi_ptr && ap_queue_enable_interruption(aq, lsi_ptr) == 0)
+               if (lsi_ptr && ap_queue_enable_irq(aq, lsi_ptr) == 0)
                        aq->sm_state = AP_SM_STATE_SETIRQ_WAIT;
                else
                        aq->sm_state = (aq->queue_count > 0) ?
@@ -396,7 +398,7 @@ static enum ap_sm_wait ap_sm_setirq_wait(struct ap_queue *aq)
 
        if (status.irq_enabled == 1) {
                /* Irqs are now enabled */
-               aq->interrupt = AP_INTR_ENABLED;
+               aq->interrupt = true;
                aq->sm_state = (aq->queue_count > 0) ?
                        AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
        }
@@ -586,7 +588,7 @@ static ssize_t interrupt_show(struct device *dev,
        spin_lock_bh(&aq->lock);
        if (aq->sm_state == AP_SM_STATE_SETIRQ_WAIT)
                rc = scnprintf(buf, PAGE_SIZE, "Enable Interrupt pending.\n");
-       else if (aq->interrupt == AP_INTR_ENABLED)
+       else if (aq->interrupt)
                rc = scnprintf(buf, PAGE_SIZE, "Interrupts enabled.\n");
        else
                rc = scnprintf(buf, PAGE_SIZE, "Interrupts disabled.\n");
@@ -767,7 +769,7 @@ struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
        aq->ap_dev.device.type = &ap_queue_type;
        aq->ap_dev.device_type = device_type;
        aq->qid = qid;
-       aq->interrupt = AP_INTR_DISABLED;
+       aq->interrupt = false;
        spin_lock_init(&aq->lock);
        INIT_LIST_HEAD(&aq->pendingq);
        INIT_LIST_HEAD(&aq->requestq);
index 122c85c..67f1455 100644 (file)
@@ -35,7 +35,7 @@ static int match_apqn(struct device *dev, const void *data)
 }
 
 /**
- * vfio_ap_get_queue: Retrieve a queue with a specific APQN from a list
+ * vfio_ap_get_queue - retrieve a queue with a specific APQN from a list
  * @matrix_mdev: the associated mediated matrix
  * @apqn: The queue APQN
  *
@@ -43,7 +43,7 @@ static int match_apqn(struct device *dev, const void *data)
  * devices of the vfio_ap_drv.
  * Verify that the APID and the APQI are set in the matrix.
  *
- * Returns the pointer to the associated vfio_ap_queue
+ * Return: the pointer to the associated vfio_ap_queue
  */
 static struct vfio_ap_queue *vfio_ap_get_queue(
                                        struct ap_matrix_mdev *matrix_mdev,
@@ -64,7 +64,7 @@ static struct vfio_ap_queue *vfio_ap_get_queue(
 }
 
 /**
- * vfio_ap_wait_for_irqclear
+ * vfio_ap_wait_for_irqclear - clears the IR bit or gives up after 5 tries
  * @apqn: The AP Queue number
  *
  * Checks the IRQ bit for the status of this APQN using ap_tapq.
@@ -72,7 +72,6 @@ static struct vfio_ap_queue *vfio_ap_get_queue(
  * Returns if ap_tapq function failed with invalid, deconfigured or
  * checkstopped AP.
  * Otherwise retries up to 5 times after waiting 20ms.
- *
  */
 static void vfio_ap_wait_for_irqclear(int apqn)
 {
@@ -105,13 +104,12 @@ static void vfio_ap_wait_for_irqclear(int apqn)
 }
 
 /**
- * vfio_ap_free_aqic_resources
+ * vfio_ap_free_aqic_resources - free vfio_ap_queue resources
  * @q: The vfio_ap_queue
  *
  * Unregisters the ISC in the GIB when the saved ISC not invalid.
- * Unpin the guest's page holding the NIB when it exist.
- * Reset the saved_pfn and saved_isc to invalid values.
- *
+ * Unpins the guest's page holding the NIB when it exists.
+ * Resets the saved_pfn and saved_isc to invalid values.
  */
 static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q)
 {
@@ -130,7 +128,7 @@ static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q)
 }
 
 /**
- * vfio_ap_irq_disable
+ * vfio_ap_irq_disable - disables and clears an ap_queue interrupt
  * @q: The vfio_ap_queue
  *
  * Uses ap_aqic to disable the interruption and in case of success, reset
@@ -144,6 +142,8 @@ static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q)
  *
  * Returns if ap_aqic function failed with invalid, deconfigured or
  * checkstopped AP.
+ *
+ * Return: &struct ap_queue_status
  */
 static struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q)
 {
@@ -183,9 +183,8 @@ end_free:
 }
 
 /**
- * vfio_ap_setirq: Enable Interruption for a APQN
+ * vfio_ap_irq_enable - Enable Interruption for a APQN
  *
- * @dev: the device associated with the ap_queue
  * @q:  the vfio_ap_queue holding AQIC parameters
  *
  * Pin the NIB saved in *q
@@ -197,6 +196,8 @@ end_free:
  *
  * Otherwise return the ap_queue_status returned by the ap_aqic(),
  * all retry handling will be done by the guest.
+ *
+ * Return: &struct ap_queue_status
  */
 static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
                                                 int isc,
@@ -253,7 +254,7 @@ static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
 }
 
 /**
- * handle_pqap: PQAP instruction callback
+ * handle_pqap - PQAP instruction callback
  *
  * @vcpu: The vcpu on which we received the PQAP instruction
  *
@@ -270,8 +271,8 @@ static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
  * We take the matrix_dev lock to ensure serialization on queues and
  * mediated device access.
  *
- * Return 0 if we could handle the request inside KVM.
- * otherwise, returns -EOPNOTSUPP to let QEMU handle the fault.
+ * Return: 0 if we could handle the request inside KVM.
+ * Otherwise, returns -EOPNOTSUPP to let QEMU handle the fault.
  */
 static int handle_pqap(struct kvm_vcpu *vcpu)
 {
@@ -426,7 +427,7 @@ struct vfio_ap_queue_reserved {
 };
 
 /**
- * vfio_ap_has_queue
+ * vfio_ap_has_queue - determines if the AP queue containing the target in @data
  *
  * @dev: an AP queue device
  * @data: a struct vfio_ap_queue_reserved reference
@@ -443,7 +444,7 @@ struct vfio_ap_queue_reserved {
  * - If @data contains only an apqi value, @data will be flagged as
  *   reserved if the APQI field in the AP queue device matches
  *
- * Returns 0 to indicate the input to function succeeded. Returns -EINVAL if
+ * Return: 0 to indicate the input to function succeeded. Returns -EINVAL if
  * @data does not contain either an apid or apqi.
  */
 static int vfio_ap_has_queue(struct device *dev, void *data)
@@ -473,9 +474,9 @@ static int vfio_ap_has_queue(struct device *dev, void *data)
 }
 
 /**
- * vfio_ap_verify_queue_reserved
+ * vfio_ap_verify_queue_reserved - verifies that the AP queue containing
+ * @apid or @aqpi is reserved
  *
- * @matrix_dev: a mediated matrix device
  * @apid: an AP adapter ID
  * @apqi: an AP queue index
  *
@@ -492,7 +493,7 @@ static int vfio_ap_has_queue(struct device *dev, void *data)
  * - If only @apqi is not NULL, then there must be an AP queue device bound
  *   to the vfio_ap driver with an APQN containing @apqi
  *
- * Returns 0 if the AP queue is reserved; otherwise, returns -EADDRNOTAVAIL.
+ * Return: 0 if the AP queue is reserved; otherwise, returns -EADDRNOTAVAIL.
  */
 static int vfio_ap_verify_queue_reserved(unsigned long *apid,
                                         unsigned long *apqi)
@@ -536,15 +537,15 @@ vfio_ap_mdev_verify_queues_reserved_for_apid(struct ap_matrix_mdev *matrix_mdev,
 }
 
 /**
- * vfio_ap_mdev_verify_no_sharing
+ * vfio_ap_mdev_verify_no_sharing - verifies that the AP matrix is not configured
+ *
+ * @matrix_mdev: the mediated matrix device
  *
  * Verifies that the APQNs derived from the cross product of the AP adapter IDs
  * and AP queue indexes comprising the AP matrix are not configured for another
  * mediated device. AP queue sharing is not allowed.
  *
- * @matrix_mdev: the mediated matrix device
- *
- * Returns 0 if the APQNs are not shared, otherwise; returns -EADDRINUSE.
+ * Return: 0 if the APQNs are not shared; otherwise returns -EADDRINUSE.
  */
 static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *matrix_mdev)
 {
@@ -578,7 +579,8 @@ static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *matrix_mdev)
 }
 
 /**
- * assign_adapter_store
+ * assign_adapter_store - parses the APID from @buf and sets the
+ * corresponding bit in the mediated matrix device's APM
  *
  * @dev:       the matrix device
  * @attr:      the mediated matrix device's assign_adapter attribute
@@ -586,10 +588,7 @@ static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *matrix_mdev)
  *             be assigned
  * @count:     the number of bytes in @buf
  *
- * Parses the APID from @buf and sets the corresponding bit in the mediated
- * matrix device's APM.
- *
- * Returns the number of bytes processed if the APID is valid; otherwise,
+ * Return: the number of bytes processed if the APID is valid; otherwise,
  * returns one of the following errors:
  *
  *     1. -EINVAL
@@ -666,17 +665,15 @@ done:
 static DEVICE_ATTR_WO(assign_adapter);
 
 /**
- * unassign_adapter_store
+ * unassign_adapter_store - parses the APID from @buf and clears the
+ * corresponding bit in the mediated matrix device's APM
  *
  * @dev:       the matrix device
  * @attr:      the mediated matrix device's unassign_adapter attribute
  * @buf:       a buffer containing the adapter number (APID) to be unassigned
  * @count:     the number of bytes in @buf
  *
- * Parses the APID from @buf and clears the corresponding bit in the mediated
- * matrix device's APM.
- *
- * Returns the number of bytes processed if the APID is valid; otherwise,
+ * Return: the number of bytes processed if the APID is valid; otherwise,
  * returns one of the following errors:
  *     -EINVAL if the APID is not a number
  *     -ENODEV if the APID it exceeds the maximum value configured for the
@@ -740,7 +737,9 @@ vfio_ap_mdev_verify_queues_reserved_for_apqi(struct ap_matrix_mdev *matrix_mdev,
 }
 
 /**
- * assign_domain_store
+ * assign_domain_store - parses the APQI from @buf and sets the
+ * corresponding bit in the mediated matrix device's AQM
+ *
  *
  * @dev:       the matrix device
  * @attr:      the mediated matrix device's assign_domain attribute
@@ -748,10 +747,7 @@ vfio_ap_mdev_verify_queues_reserved_for_apqi(struct ap_matrix_mdev *matrix_mdev,
  *             be assigned
  * @count:     the number of bytes in @buf
  *
- * Parses the APQI from @buf and sets the corresponding bit in the mediated
- * matrix device's AQM.
- *
- * Returns the number of bytes processed if the APQI is valid; otherwise returns
+ * Return: the number of bytes processed if the APQI is valid; otherwise returns
  * one of the following errors:
  *
  *     1. -EINVAL
@@ -824,7 +820,8 @@ static DEVICE_ATTR_WO(assign_domain);
 
 
 /**
- * unassign_domain_store
+ * unassign_domain_store - parses the APQI from @buf and clears the
+ * corresponding bit in the mediated matrix device's AQM
  *
  * @dev:       the matrix device
  * @attr:      the mediated matrix device's unassign_domain attribute
@@ -832,10 +829,7 @@ static DEVICE_ATTR_WO(assign_domain);
  *             be unassigned
  * @count:     the number of bytes in @buf
  *
- * Parses the APQI from @buf and clears the corresponding bit in the
- * mediated matrix device's AQM.
- *
- * Returns the number of bytes processed if the APQI is valid; otherwise,
+ * Return: the number of bytes processed if the APQI is valid; otherwise,
  * returns one of the following errors:
  *     -EINVAL if the APQI is not a number
  *     -ENODEV if the APQI exceeds the maximum value configured for the system
@@ -879,17 +873,16 @@ done:
 static DEVICE_ATTR_WO(unassign_domain);
 
 /**
- * assign_control_domain_store
+ * assign_control_domain_store - parses the domain ID from @buf and sets
+ * the corresponding bit in the mediated matrix device's ADM
+ *
  *
  * @dev:       the matrix device
  * @attr:      the mediated matrix device's assign_control_domain attribute
  * @buf:       a buffer containing the domain ID to be assigned
  * @count:     the number of bytes in @buf
  *
- * Parses the domain ID from @buf and sets the corresponding bit in the mediated
- * matrix device's ADM.
- *
- * Returns the number of bytes processed if the domain ID is valid; otherwise,
+ * Return: the number of bytes processed if the domain ID is valid; otherwise,
  * returns one of the following errors:
  *     -EINVAL if the ID is not a number
  *     -ENODEV if the ID exceeds the maximum value configured for the system
@@ -937,17 +930,15 @@ done:
 static DEVICE_ATTR_WO(assign_control_domain);
 
 /**
- * unassign_control_domain_store
+ * unassign_control_domain_store - parses the domain ID from @buf and
+ * clears the corresponding bit in the mediated matrix device's ADM
  *
  * @dev:       the matrix device
  * @attr:      the mediated matrix device's unassign_control_domain attribute
  * @buf:       a buffer containing the domain ID to be unassigned
  * @count:     the number of bytes in @buf
  *
- * Parses the domain ID from @buf and clears the corresponding bit in the
- * mediated matrix device's ADM.
- *
- * Returns the number of bytes processed if the domain ID is valid; otherwise,
+ * Return: the number of bytes processed if the domain ID is valid; otherwise,
  * returns one of the following errors:
  *     -EINVAL if the ID is not a number
  *     -ENODEV if the ID exceeds the maximum value configured for the system
@@ -1085,14 +1076,12 @@ static const struct attribute_group *vfio_ap_mdev_attr_groups[] = {
 };
 
 /**
- * vfio_ap_mdev_set_kvm
+ * vfio_ap_mdev_set_kvm - sets all data for @matrix_mdev that are needed
+ * to manage AP resources for the guest whose state is represented by @kvm
  *
  * @matrix_mdev: a mediated matrix device
  * @kvm: reference to KVM instance
  *
- * Sets all data for @matrix_mdev that are needed to manage AP resources
- * for the guest whose state is represented by @kvm.
- *
  * Note: The matrix_dev->lock must be taken prior to calling
  * this function; however, the lock will be temporarily released while the
  * guest's AP configuration is set to avoid a potential lockdep splat.
@@ -1100,7 +1089,7 @@ static const struct attribute_group *vfio_ap_mdev_attr_groups[] = {
  * certain circumstances, will result in a circular lock dependency if this is
  * done under the @matrix_mdev->lock.
  *
- * Return 0 if no other mediated matrix device has a reference to @kvm;
+ * Return: 0 if no other mediated matrix device has a reference to @kvm;
  * otherwise, returns an -EPERM.
  */
 static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev,
@@ -1131,8 +1120,8 @@ static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev,
        return 0;
 }
 
-/*
- * vfio_ap_mdev_iommu_notifier: IOMMU notifier callback
+/**
+ * vfio_ap_mdev_iommu_notifier - IOMMU notifier callback
  *
  * @nb: The notifier block
  * @action: Action to be taken
@@ -1141,6 +1130,7 @@ static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev,
  * For an UNMAP request, unpin the guest IOVA (the NIB guest address we
  * pinned before). Other requests are ignored.
  *
+ * Return: for an UNMAP request, NOFITY_OK; otherwise NOTIFY_DONE.
  */
 static int vfio_ap_mdev_iommu_notifier(struct notifier_block *nb,
                                       unsigned long action, void *data)
@@ -1161,19 +1151,17 @@ static int vfio_ap_mdev_iommu_notifier(struct notifier_block *nb,
 }
 
 /**
- * vfio_ap_mdev_unset_kvm
+ * vfio_ap_mdev_unset_kvm - performs clean-up of resources no longer needed
+ * by @matrix_mdev.
  *
  * @matrix_mdev: a matrix mediated device
  *
- * Performs clean-up of resources no longer needed by @matrix_mdev.
- *
  * Note: The matrix_dev->lock must be taken prior to calling
  * this function; however, the lock will be temporarily released while the
  * guest's AP configuration is cleared to avoid a potential lockdep splat.
  * The kvm->lock is taken to clear the guest's AP configuration which, under
  * certain circumstances, will result in a circular lock dependency if this is
  * done under the @matrix_mdev->lock.
- *
  */
 static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev)
 {
index 529ffe2..fa0cb86 100644 (file)
@@ -572,14 +572,14 @@ static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
                                                     struct module **pmod,
                                                     unsigned int weight)
 {
-       if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner))
+       if (!zq || !try_module_get(zq->queue->ap_dev.device.driver->owner))
                return NULL;
        zcrypt_queue_get(zq);
        get_device(&zq->queue->ap_dev.device);
        atomic_add(weight, &zc->load);
        atomic_add(weight, &zq->load);
        zq->request_count++;
-       *pmod = zq->queue->ap_dev.drv->driver.owner;
+       *pmod = zq->queue->ap_dev.device.driver->owner;
        return zq;
 }
 
index 40fd5d3..ef11d2a 100644 (file)
@@ -39,7 +39,7 @@
 static ssize_t type_show(struct device *dev,
                         struct device_attribute *attr, char *buf)
 {
-       struct zcrypt_card *zc = to_ap_card(dev)->private;
+       struct zcrypt_card *zc = dev_get_drvdata(dev);
 
        return scnprintf(buf, PAGE_SIZE, "%s\n", zc->type_string);
 }
@@ -50,8 +50,8 @@ static ssize_t online_show(struct device *dev,
                           struct device_attribute *attr,
                           char *buf)
 {
+       struct zcrypt_card *zc = dev_get_drvdata(dev);
        struct ap_card *ac = to_ap_card(dev);
-       struct zcrypt_card *zc = ac->private;
        int online = ac->config && zc->online ? 1 : 0;
 
        return scnprintf(buf, PAGE_SIZE, "%d\n", online);
@@ -61,8 +61,8 @@ static ssize_t online_store(struct device *dev,
                            struct device_attribute *attr,
                            const char *buf, size_t count)
 {
+       struct zcrypt_card *zc = dev_get_drvdata(dev);
        struct ap_card *ac = to_ap_card(dev);
-       struct zcrypt_card *zc = ac->private;
        struct zcrypt_queue *zq;
        int online, id, i = 0, maxzqs = 0;
        struct zcrypt_queue **zq_uelist = NULL;
@@ -116,7 +116,7 @@ static ssize_t load_show(struct device *dev,
                         struct device_attribute *attr,
                         char *buf)
 {
-       struct zcrypt_card *zc = to_ap_card(dev)->private;
+       struct zcrypt_card *zc = dev_get_drvdata(dev);
 
        return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&zc->load));
 }
index bc34bed..6a3c2b4 100644 (file)
@@ -1724,10 +1724,10 @@ static int fetch_cca_info(u16 cardnr, u16 domain, struct cca_info *ci)
        rlen = vlen = PAGE_SIZE/2;
        rc = cca_query_crypto_facility(cardnr, domain, "STATICSB",
                                       rarray, &rlen, varray, &vlen);
-       if (rc == 0 && rlen >= 10*8 && vlen >= 240) {
-               ci->new_apka_mk_state = (char) rarray[7*8];
-               ci->cur_apka_mk_state = (char) rarray[8*8];
-               ci->old_apka_mk_state = (char) rarray[9*8];
+       if (rc == 0 && rlen >= 13*8 && vlen >= 240) {
+               ci->new_apka_mk_state = (char) rarray[10*8];
+               ci->cur_apka_mk_state = (char) rarray[11*8];
+               ci->old_apka_mk_state = (char) rarray[12*8];
                if (ci->old_apka_mk_state == '2')
                        memcpy(&ci->old_apka_mkvp, varray + 208, 8);
                if (ci->cur_apka_mk_state == '2')
index 62ceeb7..fa8293d 100644 (file)
@@ -89,7 +89,7 @@ static int zcrypt_cex2a_card_probe(struct ap_device *ap_dev)
        if (!zc)
                return -ENOMEM;
        zc->card = ac;
-       ac->private = zc;
+       dev_set_drvdata(&ap_dev->device, zc);
 
        if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX2A) {
                zc->min_mod_size = CEX2A_MIN_MOD_SIZE;
@@ -118,7 +118,6 @@ static int zcrypt_cex2a_card_probe(struct ap_device *ap_dev)
 
        rc = zcrypt_card_register(zc);
        if (rc) {
-               ac->private = NULL;
                zcrypt_card_free(zc);
        }
 
@@ -131,10 +130,9 @@ static int zcrypt_cex2a_card_probe(struct ap_device *ap_dev)
  */
 static void zcrypt_cex2a_card_remove(struct ap_device *ap_dev)
 {
-       struct zcrypt_card *zc = to_ap_card(&ap_dev->device)->private;
+       struct zcrypt_card *zc = dev_get_drvdata(&ap_dev->device);
 
-       if (zc)
-               zcrypt_card_unregister(zc);
+       zcrypt_card_unregister(zc);
 }
 
 static struct ap_driver zcrypt_cex2a_card_driver = {
@@ -176,10 +174,9 @@ static int zcrypt_cex2a_queue_probe(struct ap_device *ap_dev)
        ap_queue_init_state(aq);
        ap_queue_init_reply(aq, &zq->reply);
        aq->request_timeout = CEX2A_CLEANUP_TIME;
-       aq->private = zq;
+       dev_set_drvdata(&ap_dev->device, zq);
        rc = zcrypt_queue_register(zq);
        if (rc) {
-               aq->private = NULL;
                zcrypt_queue_free(zq);
        }
 
@@ -192,11 +189,9 @@ static int zcrypt_cex2a_queue_probe(struct ap_device *ap_dev)
  */
 static void zcrypt_cex2a_queue_remove(struct ap_device *ap_dev)
 {
-       struct ap_queue *aq = to_ap_queue(&ap_dev->device);
-       struct zcrypt_queue *zq = aq->private;
+       struct zcrypt_queue *zq = dev_get_drvdata(&ap_dev->device);
 
-       if (zq)
-               zcrypt_queue_unregister(zq);
+       zcrypt_queue_unregister(zq);
 }
 
 static struct ap_driver zcrypt_cex2a_queue_driver = {
index 7a8cbdb..a0b9f11 100644 (file)
@@ -66,9 +66,9 @@ static ssize_t cca_serialnr_show(struct device *dev,
                                 struct device_attribute *attr,
                                 char *buf)
 {
+       struct zcrypt_card *zc = dev_get_drvdata(dev);
        struct cca_info ci;
        struct ap_card *ac = to_ap_card(dev);
-       struct zcrypt_card *zc = ac->private;
 
        memset(&ci, 0, sizeof(ci));
 
@@ -97,9 +97,9 @@ static ssize_t cca_mkvps_show(struct device *dev,
                              struct device_attribute *attr,
                              char *buf)
 {
+       struct zcrypt_queue *zq = dev_get_drvdata(dev);
        int n = 0;
        struct cca_info ci;
-       struct zcrypt_queue *zq = to_ap_queue(dev)->private;
        static const char * const cao_state[] = { "invalid", "valid" };
        static const char * const new_state[] = { "empty", "partial", "full" };
 
@@ -261,7 +261,7 @@ static int zcrypt_cex2c_card_probe(struct ap_device *ap_dev)
        if (!zc)
                return -ENOMEM;
        zc->card = ac;
-       ac->private = zc;
+       dev_set_drvdata(&ap_dev->device, zc);
        switch (ac->ap_dev.device_type) {
        case AP_DEVICE_TYPE_CEX2C:
                zc->user_space_type = ZCRYPT_CEX2C;
@@ -287,7 +287,6 @@ static int zcrypt_cex2c_card_probe(struct ap_device *ap_dev)
 
        rc = zcrypt_card_register(zc);
        if (rc) {
-               ac->private = NULL;
                zcrypt_card_free(zc);
                return rc;
        }
@@ -297,7 +296,6 @@ static int zcrypt_cex2c_card_probe(struct ap_device *ap_dev)
                                        &cca_card_attr_grp);
                if (rc) {
                        zcrypt_card_unregister(zc);
-                       ac->private = NULL;
                        zcrypt_card_free(zc);
                }
        }
@@ -311,13 +309,13 @@ static int zcrypt_cex2c_card_probe(struct ap_device *ap_dev)
  */
 static void zcrypt_cex2c_card_remove(struct ap_device *ap_dev)
 {
+       struct zcrypt_card *zc = dev_get_drvdata(&ap_dev->device);
        struct ap_card *ac = to_ap_card(&ap_dev->device);
-       struct zcrypt_card *zc = to_ap_card(&ap_dev->device)->private;
 
        if (ap_test_bit(&ac->functions, AP_FUNC_COPRO))
                sysfs_remove_group(&ap_dev->device.kobj, &cca_card_attr_grp);
-       if (zc)
-               zcrypt_card_unregister(zc);
+
+       zcrypt_card_unregister(zc);
 }
 
 static struct ap_driver zcrypt_cex2c_card_driver = {
@@ -359,10 +357,9 @@ static int zcrypt_cex2c_queue_probe(struct ap_device *ap_dev)
        ap_queue_init_state(aq);
        ap_queue_init_reply(aq, &zq->reply);
        aq->request_timeout = CEX2C_CLEANUP_TIME;
-       aq->private = zq;
+       dev_set_drvdata(&ap_dev->device, zq);
        rc = zcrypt_queue_register(zq);
        if (rc) {
-               aq->private = NULL;
                zcrypt_queue_free(zq);
                return rc;
        }
@@ -372,7 +369,6 @@ static int zcrypt_cex2c_queue_probe(struct ap_device *ap_dev)
                                        &cca_queue_attr_grp);
                if (rc) {
                        zcrypt_queue_unregister(zq);
-                       aq->private = NULL;
                        zcrypt_queue_free(zq);
                }
        }
@@ -386,13 +382,13 @@ static int zcrypt_cex2c_queue_probe(struct ap_device *ap_dev)
  */
 static void zcrypt_cex2c_queue_remove(struct ap_device *ap_dev)
 {
+       struct zcrypt_queue *zq = dev_get_drvdata(&ap_dev->device);
        struct ap_queue *aq = to_ap_queue(&ap_dev->device);
-       struct zcrypt_queue *zq = aq->private;
 
        if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO))
                sysfs_remove_group(&ap_dev->device.kobj, &cca_queue_attr_grp);
-       if (zq)
-               zcrypt_queue_unregister(zq);
+
+       zcrypt_queue_unregister(zq);
 }
 
 static struct ap_driver zcrypt_cex2c_queue_driver = {
index f518b5f..1f7ec54 100644 (file)
@@ -75,9 +75,9 @@ static ssize_t cca_serialnr_show(struct device *dev,
                                 struct device_attribute *attr,
                                 char *buf)
 {
+       struct zcrypt_card *zc = dev_get_drvdata(dev);
        struct cca_info ci;
        struct ap_card *ac = to_ap_card(dev);
-       struct zcrypt_card *zc = ac->private;
 
        memset(&ci, 0, sizeof(ci));
 
@@ -106,9 +106,9 @@ static ssize_t cca_mkvps_show(struct device *dev,
                              struct device_attribute *attr,
                              char *buf)
 {
+       struct zcrypt_queue *zq = dev_get_drvdata(dev);
        int n = 0;
        struct cca_info ci;
-       struct zcrypt_queue *zq = to_ap_queue(dev)->private;
        static const char * const cao_state[] = { "invalid", "valid" };
        static const char * const new_state[] = { "empty", "partial", "full" };
 
@@ -187,9 +187,9 @@ static ssize_t ep11_api_ordinalnr_show(struct device *dev,
                                       struct device_attribute *attr,
                                       char *buf)
 {
+       struct zcrypt_card *zc = dev_get_drvdata(dev);
        struct ep11_card_info ci;
        struct ap_card *ac = to_ap_card(dev);
-       struct zcrypt_card *zc = ac->private;
 
        memset(&ci, 0, sizeof(ci));
 
@@ -208,9 +208,9 @@ static ssize_t ep11_fw_version_show(struct device *dev,
                                    struct device_attribute *attr,
                                    char *buf)
 {
+       struct zcrypt_card *zc = dev_get_drvdata(dev);
        struct ep11_card_info ci;
        struct ap_card *ac = to_ap_card(dev);
-       struct zcrypt_card *zc = ac->private;
 
        memset(&ci, 0, sizeof(ci));
 
@@ -231,9 +231,9 @@ static ssize_t ep11_serialnr_show(struct device *dev,
                                  struct device_attribute *attr,
                                  char *buf)
 {
+       struct zcrypt_card *zc = dev_get_drvdata(dev);
        struct ep11_card_info ci;
        struct ap_card *ac = to_ap_card(dev);
-       struct zcrypt_card *zc = ac->private;
 
        memset(&ci, 0, sizeof(ci));
 
@@ -264,10 +264,10 @@ static ssize_t ep11_card_op_modes_show(struct device *dev,
                                       struct device_attribute *attr,
                                       char *buf)
 {
+       struct zcrypt_card *zc = dev_get_drvdata(dev);
        int i, n = 0;
        struct ep11_card_info ci;
        struct ap_card *ac = to_ap_card(dev);
-       struct zcrypt_card *zc = ac->private;
 
        memset(&ci, 0, sizeof(ci));
 
@@ -309,9 +309,9 @@ static ssize_t ep11_mkvps_show(struct device *dev,
                               struct device_attribute *attr,
                               char *buf)
 {
+       struct zcrypt_queue *zq = dev_get_drvdata(dev);
        int n = 0;
        struct ep11_domain_info di;
-       struct zcrypt_queue *zq = to_ap_queue(dev)->private;
        static const char * const cwk_state[] = { "invalid", "valid" };
        static const char * const nwk_state[] = { "empty", "uncommitted",
                                                  "committed" };
@@ -357,9 +357,9 @@ static ssize_t ep11_queue_op_modes_show(struct device *dev,
                                        struct device_attribute *attr,
                                        char *buf)
 {
+       struct zcrypt_queue *zq = dev_get_drvdata(dev);
        int i, n = 0;
        struct ep11_domain_info di;
-       struct zcrypt_queue *zq = to_ap_queue(dev)->private;
 
        memset(&di, 0, sizeof(di));
 
@@ -441,7 +441,7 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
        if (!zc)
                return -ENOMEM;
        zc->card = ac;
-       ac->private = zc;
+       dev_set_drvdata(&ap_dev->device, zc);
        if (ap_test_bit(&ac->functions, AP_FUNC_ACCEL)) {
                if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) {
                        zc->type_string = "CEX4A";
@@ -539,7 +539,6 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
 
        rc = zcrypt_card_register(zc);
        if (rc) {
-               ac->private = NULL;
                zcrypt_card_free(zc);
                return rc;
        }
@@ -549,7 +548,6 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
                                        &cca_card_attr_grp);
                if (rc) {
                        zcrypt_card_unregister(zc);
-                       ac->private = NULL;
                        zcrypt_card_free(zc);
                }
        } else if (ap_test_bit(&ac->functions, AP_FUNC_EP11)) {
@@ -557,7 +555,6 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
                                        &ep11_card_attr_grp);
                if (rc) {
                        zcrypt_card_unregister(zc);
-                       ac->private = NULL;
                        zcrypt_card_free(zc);
                }
        }
@@ -571,15 +568,15 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
  */
 static void zcrypt_cex4_card_remove(struct ap_device *ap_dev)
 {
+       struct zcrypt_card *zc = dev_get_drvdata(&ap_dev->device);
        struct ap_card *ac = to_ap_card(&ap_dev->device);
-       struct zcrypt_card *zc = ac->private;
 
        if (ap_test_bit(&ac->functions, AP_FUNC_COPRO))
                sysfs_remove_group(&ap_dev->device.kobj, &cca_card_attr_grp);
        else if (ap_test_bit(&ac->functions, AP_FUNC_EP11))
                sysfs_remove_group(&ap_dev->device.kobj, &ep11_card_attr_grp);
-       if (zc)
-               zcrypt_card_unregister(zc);
+
+       zcrypt_card_unregister(zc);
 }
 
 static struct ap_driver zcrypt_cex4_card_driver = {
@@ -629,10 +626,9 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
        ap_queue_init_state(aq);
        ap_queue_init_reply(aq, &zq->reply);
        aq->request_timeout = CEX4_CLEANUP_TIME;
-       aq->private = zq;
+       dev_set_drvdata(&ap_dev->device, zq);
        rc = zcrypt_queue_register(zq);
        if (rc) {
-               aq->private = NULL;
                zcrypt_queue_free(zq);
                return rc;
        }
@@ -642,7 +638,6 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
                                        &cca_queue_attr_grp);
                if (rc) {
                        zcrypt_queue_unregister(zq);
-                       aq->private = NULL;
                        zcrypt_queue_free(zq);
                }
        } else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11)) {
@@ -650,7 +645,6 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
                                        &ep11_queue_attr_grp);
                if (rc) {
                        zcrypt_queue_unregister(zq);
-                       aq->private = NULL;
                        zcrypt_queue_free(zq);
                }
        }
@@ -664,15 +658,15 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
  */
 static void zcrypt_cex4_queue_remove(struct ap_device *ap_dev)
 {
+       struct zcrypt_queue *zq = dev_get_drvdata(&ap_dev->device);
        struct ap_queue *aq = to_ap_queue(&ap_dev->device);
-       struct zcrypt_queue *zq = aq->private;
 
        if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO))
                sysfs_remove_group(&ap_dev->device.kobj, &cca_queue_attr_grp);
        else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11))
                sysfs_remove_group(&ap_dev->device.kobj, &ep11_queue_attr_grp);
-       if (zq)
-               zcrypt_queue_unregister(zq);
+
+       zcrypt_queue_unregister(zq);
 }
 
 static struct ap_driver zcrypt_cex4_queue_driver = {
index 20f1228..398bde2 100644 (file)
@@ -40,8 +40,8 @@ static ssize_t online_show(struct device *dev,
                           struct device_attribute *attr,
                           char *buf)
 {
+       struct zcrypt_queue *zq = dev_get_drvdata(dev);
        struct ap_queue *aq = to_ap_queue(dev);
-       struct zcrypt_queue *zq = aq->private;
        int online = aq->config && zq->online ? 1 : 0;
 
        return scnprintf(buf, PAGE_SIZE, "%d\n", online);
@@ -51,8 +51,8 @@ static ssize_t online_store(struct device *dev,
                            struct device_attribute *attr,
                            const char *buf, size_t count)
 {
+       struct zcrypt_queue *zq = dev_get_drvdata(dev);
        struct ap_queue *aq = to_ap_queue(dev);
-       struct zcrypt_queue *zq = aq->private;
        struct zcrypt_card *zc = zq->zcard;
        int online;
 
@@ -83,7 +83,7 @@ static ssize_t load_show(struct device *dev,
                         struct device_attribute *attr,
                         char *buf)
 {
-       struct zcrypt_queue *zq = to_ap_queue(dev)->private;
+       struct zcrypt_queue *zq = dev_get_drvdata(dev);
 
        return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&zq->load));
 }
@@ -170,7 +170,7 @@ int zcrypt_queue_register(struct zcrypt_queue *zq)
        int rc;
 
        spin_lock(&zcrypt_list_lock);
-       zc = zq->queue->card->private;
+       zc = dev_get_drvdata(&zq->queue->card->ap_dev.device);
        zcrypt_card_get(zc);
        zq->zcard = zc;
        zq->online = 1; /* New devices are online by default. */
index 62f88cc..f96755a 100644 (file)
@@ -3804,14 +3804,10 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
                                     unsigned long card_ptr)
 {
        struct qeth_card *card        = (struct qeth_card *) card_ptr;
-       struct net_device *dev = card->dev;
 
-       QETH_CARD_TEXT(card, 6, "qdouhdl");
-       if (qdio_error & QDIO_ERROR_FATAL) {
-               QETH_CARD_TEXT(card, 2, "achkcond");
-               netif_tx_stop_all_queues(dev);
-               qeth_schedule_recovery(card);
-       }
+       QETH_CARD_TEXT(card, 2, "achkcond");
+       netif_tx_stop_all_queues(card->dev);
+       qeth_schedule_recovery(card);
 }
 
 /**
index 2abf86c..d7cdd9c 100644 (file)
@@ -279,7 +279,7 @@ static void qeth_l2_set_pnso_mode(struct qeth_card *card,
 
 static void qeth_l2_dev2br_fdb_flush(struct qeth_card *card)
 {
-       struct switchdev_notifier_fdb_info info;
+       struct switchdev_notifier_fdb_info info = {};
 
        QETH_CARD_TEXT(card, 2, "fdbflush");
 
@@ -679,7 +679,7 @@ static void qeth_l2_dev2br_fdb_notify(struct qeth_card *card, u8 code,
                                      struct net_if_token *token,
                                      struct mac_addr_lnid *addr_lnid)
 {
-       struct switchdev_notifier_fdb_info info;
+       struct switchdev_notifier_fdb_info info = {};
        u8 ntfy_mac[ETH_ALEN];
 
        ether_addr_copy(ntfy_mac, addr_lnid->mac);
index 6671d95..8f19bed 100644 (file)
@@ -69,10 +69,7 @@ static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
 {
        struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
 
-       if (unlikely(qdio_err)) {
-               zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err);
-               return;
-       }
+       zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err);
 }
 
 static void zfcp_qdio_request_tasklet(struct tasklet_struct *tasklet)
index 84fc7a0..4a84599 100644 (file)
@@ -2642,6 +2642,7 @@ int acornscsi_abort(struct scsi_cmnd *SCpnt)
 //#endif
                clear_bit(SCpnt->device->id * 8 +
                          (u8)(SCpnt->device->lun & 0x7), host->busyluns);
+               fallthrough;
 
        /*
         * We found the command, and cleared it out.  Either
index 6baa9b3..9c4458a 100644 (file)
@@ -1375,6 +1375,7 @@ static void fas216_busservice_intr(FAS216_Info *info, unsigned int stat, unsigne
                case IS_COMPLETE:
                        break;
                }
+               break;
 
        default:
                break;
index 25f6e1a..66652ab 100644 (file)
@@ -453,8 +453,8 @@ static int initialize_controller(struct scsi_device *sdev,
                if (!h->ctlr)
                        err = SCSI_DH_RES_TEMP_UNAVAIL;
                else {
-                       list_add_rcu(&h->node, &h->ctlr->dh_list);
                        h->sdev = sdev;
+                       list_add_rcu(&h->node, &h->ctlr->dh_list);
                }
                spin_unlock(&list_lock);
                err = SCSI_DH_OK;
@@ -778,11 +778,11 @@ static void rdac_bus_detach( struct scsi_device *sdev )
        spin_lock(&list_lock);
        if (h->ctlr) {
                list_del_rcu(&h->node);
-               h->sdev = NULL;
                kref_put(&h->ctlr->kref, release_controller);
        }
        spin_unlock(&list_lock);
        sdev->handler_data = NULL;
+       synchronize_rcu();
        kfree(h);
 }
 
index bee1bec..935b01e 100644 (file)
@@ -807,6 +807,13 @@ static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost,
        for (i = 0; i < size; ++i) {
                struct ibmvfc_event *evt = &pool->events[i];
 
+               /*
+                * evt->active states
+                *  1 = in flight
+                *  0 = being completed
+                * -1 = free/freed
+                */
+               atomic_set(&evt->active, -1);
                atomic_set(&evt->free, 1);
                evt->crq.valid = 0x80;
                evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i));
@@ -1017,6 +1024,7 @@ static void ibmvfc_free_event(struct ibmvfc_event *evt)
 
        BUG_ON(!ibmvfc_valid_event(pool, evt));
        BUG_ON(atomic_inc_return(&evt->free) != 1);
+       BUG_ON(atomic_dec_and_test(&evt->active));
 
        spin_lock_irqsave(&evt->queue->l_lock, flags);
        list_add_tail(&evt->queue_list, &evt->queue->free);
@@ -1072,6 +1080,12 @@ static void ibmvfc_complete_purge(struct list_head *purge_list)
  **/
 static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code)
 {
+       /*
+        * Anything we are failing should still be active. Otherwise, it
+        * implies we already got a response for the command and are doing
+        * something bad like double completing it.
+        */
+       BUG_ON(!atomic_dec_and_test(&evt->active));
        if (evt->cmnd) {
                evt->cmnd->result = (error_code << 16);
                evt->done = ibmvfc_scsi_eh_done;
@@ -1723,6 +1737,7 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt,
 
                evt->done(evt);
        } else {
+               atomic_set(&evt->active, 1);
                spin_unlock_irqrestore(&evt->queue->l_lock, flags);
                ibmvfc_trc_start(evt);
        }
@@ -3251,7 +3266,7 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost,
                return;
        }
 
-       if (unlikely(atomic_read(&evt->free))) {
+       if (unlikely(atomic_dec_if_positive(&evt->active))) {
                dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n",
                        crq->ioba);
                return;
@@ -3778,7 +3793,7 @@ static void ibmvfc_handle_scrq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost
                return;
        }
 
-       if (unlikely(atomic_read(&evt->free))) {
+       if (unlikely(atomic_dec_if_positive(&evt->active))) {
                dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n",
                        crq->ioba);
                return;
index 4f0f3ba..92fb889 100644 (file)
@@ -745,6 +745,7 @@ struct ibmvfc_event {
        struct ibmvfc_target *tgt;
        struct scsi_cmnd *cmnd;
        atomic_t free;
+       atomic_t active;
        union ibmvfc_iu *xfer_iu;
        void (*done)(struct ibmvfc_event *evt);
        void (*_done)(struct ibmvfc_event *evt);
index 5983e05..e29523a 100644 (file)
@@ -13193,6 +13193,8 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
        if (!phba)
                return -ENOMEM;
 
+       INIT_LIST_HEAD(&phba->poll_list);
+
        /* Perform generic PCI device enabling operation */
        error = lpfc_enable_pci_dev(phba);
        if (error)
@@ -13327,7 +13329,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
        /* Enable RAS FW log support */
        lpfc_sli4_ras_setup(phba);
 
-       INIT_LIST_HEAD(&phba->poll_list);
        timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
        cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp);
 
index abf7b40..c509440 100644 (file)
@@ -238,7 +238,7 @@ mraid_mm_get_adapter(mimd_t __user *umimd, int *rval)
        mimd_t          mimd;
        uint32_t        adapno;
        int             iterator;
-
+       bool            is_found;
 
        if (copy_from_user(&mimd, umimd, sizeof(mimd_t))) {
                *rval = -EFAULT;
@@ -254,12 +254,16 @@ mraid_mm_get_adapter(mimd_t __user *umimd, int *rval)
 
        adapter = NULL;
        iterator = 0;
+       is_found = false;
 
        list_for_each_entry(adapter, &adapters_list_g, list) {
-               if (iterator++ == adapno) break;
+               if (iterator++ == adapno) {
+                       is_found = true;
+                       break;
+               }
        }
 
-       if (!adapter) {
+       if (!is_found) {
                *rval = -ENODEV;
                return NULL;
        }
@@ -725,6 +729,7 @@ ioctl_done(uioc_t *kioc)
        uint32_t        adapno;
        int             iterator;
        mraid_mmadp_t*  adapter;
+       bool            is_found;
 
        /*
         * When the kioc returns from driver, make sure it still doesn't
@@ -747,19 +752,23 @@ ioctl_done(uioc_t *kioc)
                iterator        = 0;
                adapter         = NULL;
                adapno          = kioc->adapno;
+               is_found        = false;
 
                con_log(CL_ANN, ( KERN_WARNING "megaraid cmm: completed "
                                        "ioctl that was timedout before\n"));
 
                list_for_each_entry(adapter, &adapters_list_g, list) {
-                       if (iterator++ == adapno) break;
+                       if (iterator++ == adapno) {
+                               is_found = true;
+                               break;
+                       }
                }
 
                kioc->timedout = 0;
 
-               if (adapter) {
+               if (is_found)
                        mraid_mm_dealloc_kioc( adapter, kioc );
-               }
+
        }
        else {
                wake_up(&wait_q);
index 19b1c0c..cf4a3a2 100644 (file)
@@ -7851,7 +7851,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
                        return r;
        }
 
-       rc = _base_static_config_pages(ioc);
+       r = _base_static_config_pages(ioc);
        if (r)
                return r;
 
index 48548a9..32e60f0 100644 (file)
@@ -684,8 +684,7 @@ int pm8001_dev_found(struct domain_device *dev)
 
 void pm8001_task_done(struct sas_task *task)
 {
-       if (!del_timer(&task->slow_task->timer))
-               return;
+       del_timer(&task->slow_task->timer);
        complete(&task->slow_task->completion);
 }
 
@@ -693,9 +692,14 @@ static void pm8001_tmf_timedout(struct timer_list *t)
 {
        struct sas_task_slow *slow = from_timer(slow, t, timer);
        struct sas_task *task = slow->task;
+       unsigned long flags;
 
-       task->task_state_flags |= SAS_TASK_STATE_ABORTED;
-       complete(&task->slow_task->completion);
+       spin_lock_irqsave(&task->task_state_lock, flags);
+       if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
+               task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+               complete(&task->slow_task->completion);
+       }
+       spin_unlock_irqrestore(&task->task_state_lock, flags);
 }
 
 #define PM8001_TASK_TIMEOUT 20
@@ -748,13 +752,10 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
                }
                res = -TMF_RESP_FUNC_FAILED;
                /* Even TMF timed out, return direct. */
-               if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
-                       if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
-                               pm8001_dbg(pm8001_ha, FAIL,
-                                          "TMF task[%x]timeout.\n",
-                                          tmf->tmf);
-                               goto ex_err;
-                       }
+               if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
+                       pm8001_dbg(pm8001_ha, FAIL, "TMF task[%x]timeout.\n",
+                                  tmf->tmf);
+                       goto ex_err;
                }
 
                if (task->task_status.resp == SAS_TASK_COMPLETE &&
@@ -834,12 +835,9 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
                wait_for_completion(&task->slow_task->completion);
                res = TMF_RESP_FUNC_FAILED;
                /* Even TMF timed out, return direct. */
-               if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
-                       if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
-                               pm8001_dbg(pm8001_ha, FAIL,
-                                          "TMF task timeout.\n");
-                               goto ex_err;
-                       }
+               if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
+                       pm8001_dbg(pm8001_ha, FAIL, "TMF task timeout.\n");
+                       goto ex_err;
                }
 
                if (task->task_status.resp == SAS_TASK_COMPLETE &&
index b059bf2..5b6996a 100644 (file)
@@ -475,7 +475,8 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
                error = shost->hostt->target_alloc(starget);
 
                if(error) {
-                       dev_printk(KERN_ERR, dev, "target allocation failed, error %d\n", error);
+                       if (error != -ENXIO)
+                               dev_err(dev, "target allocation failed, error %d\n", error);
                        /* don't want scsi_target_reap to do the final
                         * put because it will be under the host lock */
                        scsi_target_destroy(starget);
index 32489d2..c0d3111 100644 (file)
@@ -807,11 +807,17 @@ store_state_field(struct device *dev, struct device_attribute *attr,
        mutex_lock(&sdev->state_mutex);
        ret = scsi_device_set_state(sdev, state);
        /*
-        * If the device state changes to SDEV_RUNNING, we need to run
-        * the queue to avoid I/O hang.
+        * If the device state changes to SDEV_RUNNING, we need to
+        * run the queue to avoid I/O hang, and rescan the device
+        * to revalidate it. Running the queue first is necessary
+        * because another thread may be waiting inside
+        * blk_mq_freeze_queue_wait() and because that call may be
+        * waiting for pending I/O to finish.
         */
-       if (ret == 0 && state == SDEV_RUNNING)
+       if (ret == 0 && state == SDEV_RUNNING) {
                blk_mq_run_hw_queues(sdev->request_queue, true);
+               scsi_rescan_device(dev);
+       }
        mutex_unlock(&sdev->state_mutex);
 
        return ret == 0 ? count : -EINVAL;
index b8d55af..610ebba 100644 (file)
@@ -129,6 +129,7 @@ static DEFINE_MUTEX(sd_ref_mutex);
 static struct kmem_cache *sd_cdb_cache;
 static mempool_t *sd_cdb_pool;
 static mempool_t *sd_page_pool;
+static struct lock_class_key sd_bio_compl_lkclass;
 
 static const char *sd_cache_types[] = {
        "write through", "none", "write back",
@@ -886,7 +887,7 @@ static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
        cmd->cmnd[0] = UNMAP;
        cmd->cmnd[8] = 24;
 
-       buf = page_address(rq->special_vec.bv_page);
+       buf = bvec_virt(&rq->special_vec);
        put_unaligned_be16(6 + 16, &buf[0]);
        put_unaligned_be16(16, &buf[2]);
        put_unaligned_be64(lba, &buf[8]);
@@ -3408,7 +3409,8 @@ static int sd_probe(struct device *dev)
        if (!sdkp)
                goto out;
 
-       gd = alloc_disk(SD_MINORS);
+       gd = __alloc_disk_node(sdp->request_queue, NUMA_NO_NODE,
+                              &sd_bio_compl_lkclass);
        if (!gd)
                goto out_free;
 
@@ -3454,10 +3456,10 @@ static int sd_probe(struct device *dev)
 
        gd->major = sd_major((index & 0xf0) >> 4);
        gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
+       gd->minors = SD_MINORS;
 
        gd->fops = &sd_fops;
        gd->private_data = &sdkp->driver;
-       gd->queue = sdkp->device->request_queue;
 
        /* defaults, until the device tells us otherwise */
        sdp->sector_size = 512;
index 91e2221..d5889b4 100644 (file)
@@ -166,7 +166,7 @@ typedef struct sg_device { /* holds the state of each scsi generic device */
        bool exclude;           /* 1->open(O_EXCL) succeeded and is active */
        int open_cnt;           /* count of opens (perhaps < num(sfds) ) */
        char sgdebug;           /* 0->off, 1->sense, 9->dump dev, 10-> all devs */
-       struct gendisk *disk;
+       char name[DISK_NAME_LEN];
        struct cdev * cdev;     /* char_dev [sysfs: /sys/cdev/major/sg<n>] */
        struct kref d_ref;
 } Sg_device;
@@ -202,8 +202,7 @@ static void sg_device_destroy(struct kref *kref);
 #define SZ_SG_REQ_INFO sizeof(sg_req_info_t)
 
 #define sg_printk(prefix, sdp, fmt, a...) \
-       sdev_prefix_printk(prefix, (sdp)->device,               \
-                          (sdp)->disk->disk_name, fmt, ##a)
+       sdev_prefix_printk(prefix, (sdp)->device, (sdp)->name, fmt, ##a)
 
 /*
  * The SCSI interfaces that use read() and write() as an asynchronous variant of
@@ -832,7 +831,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
 
        srp->rq->timeout = timeout;
        kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */
-       blk_execute_rq_nowait(sdp->disk, srp->rq, at_head, sg_rq_end_io);
+       blk_execute_rq_nowait(NULL, srp->rq, at_head, sg_rq_end_io);
        return 0;
 }
 
@@ -1119,8 +1118,7 @@ sg_ioctl_common(struct file *filp, Sg_device *sdp, Sg_fd *sfp,
                return put_user(max_sectors_bytes(sdp->device->request_queue),
                                ip);
        case BLKTRACESETUP:
-               return blk_trace_setup(sdp->device->request_queue,
-                                      sdp->disk->disk_name,
+               return blk_trace_setup(sdp->device->request_queue, sdp->name,
                                       MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
                                       NULL, p);
        case BLKTRACESTART:
@@ -1456,7 +1454,7 @@ static struct class *sg_sysfs_class;
 static int sg_sysfs_valid = 0;
 
 static Sg_device *
-sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
+sg_alloc(struct scsi_device *scsidp)
 {
        struct request_queue *q = scsidp->request_queue;
        Sg_device *sdp;
@@ -1492,9 +1490,7 @@ sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
 
        SCSI_LOG_TIMEOUT(3, sdev_printk(KERN_INFO, scsidp,
                                        "sg_alloc: dev=%d \n", k));
-       sprintf(disk->disk_name, "sg%d", k);
-       disk->first_minor = k;
-       sdp->disk = disk;
+       sprintf(sdp->name, "sg%d", k);
        sdp->device = scsidp;
        mutex_init(&sdp->open_rel_lock);
        INIT_LIST_HEAD(&sdp->sfds);
@@ -1521,19 +1517,11 @@ static int
 sg_add_device(struct device *cl_dev, struct class_interface *cl_intf)
 {
        struct scsi_device *scsidp = to_scsi_device(cl_dev->parent);
-       struct gendisk *disk;
        Sg_device *sdp = NULL;
        struct cdev * cdev = NULL;
        int error;
        unsigned long iflags;
 
-       disk = alloc_disk(1);
-       if (!disk) {
-               pr_warn("%s: alloc_disk failed\n", __func__);
-               return -ENOMEM;
-       }
-       disk->major = SCSI_GENERIC_MAJOR;
-
        error = -ENOMEM;
        cdev = cdev_alloc();
        if (!cdev) {
@@ -1543,7 +1531,7 @@ sg_add_device(struct device *cl_dev, struct class_interface *cl_intf)
        cdev->owner = THIS_MODULE;
        cdev->ops = &sg_fops;
 
-       sdp = sg_alloc(disk, scsidp);
+       sdp = sg_alloc(scsidp);
        if (IS_ERR(sdp)) {
                pr_warn("%s: sg_alloc failed\n", __func__);
                error = PTR_ERR(sdp);
@@ -1561,7 +1549,7 @@ sg_add_device(struct device *cl_dev, struct class_interface *cl_intf)
                sg_class_member = device_create(sg_sysfs_class, cl_dev->parent,
                                                MKDEV(SCSI_GENERIC_MAJOR,
                                                      sdp->index),
-                                               sdp, "%s", disk->disk_name);
+                                               sdp, "%s", sdp->name);
                if (IS_ERR(sg_class_member)) {
                        pr_err("%s: device_create failed\n", __func__);
                        error = PTR_ERR(sg_class_member);
@@ -1589,7 +1577,6 @@ cdev_add_err:
        kfree(sdp);
 
 out:
-       put_disk(disk);
        if (cdev)
                cdev_del(cdev);
        return error;
@@ -1613,7 +1600,6 @@ sg_device_destroy(struct kref *kref)
        SCSI_LOG_TIMEOUT(3,
                sg_printk(KERN_INFO, sdp, "sg_device_destroy\n"));
 
-       put_disk(sdp->disk);
        kfree(sdp);
 }
 
@@ -2606,7 +2592,7 @@ static int sg_proc_seq_show_debug(struct seq_file *s, void *v)
                goto skip;
        read_lock(&sdp->sfd_lock);
        if (!list_empty(&sdp->sfds)) {
-               seq_printf(s, " >>> device=%s ", sdp->disk->disk_name);
+               seq_printf(s, " >>> device=%s ", sdp->name);
                if (atomic_read(&sdp->detaching))
                        seq_puts(s, "detaching pending close ");
                else if (sdp->device) {
index 94c254e..2942a4e 100644 (file)
@@ -106,6 +106,8 @@ static struct scsi_driver sr_template = {
 static unsigned long sr_index_bits[SR_DISKS / BITS_PER_LONG];
 static DEFINE_SPINLOCK(sr_index_lock);
 
+static struct lock_class_key sr_bio_compl_lkclass;
+
 /* This semaphore is used to mediate the 0->1 reference get in the
  * face of object destruction (i.e. we can't allow a get on an
  * object after last put) */
@@ -221,7 +223,7 @@ static unsigned int sr_get_events(struct scsi_device *sdev)
        else if (med->media_event_code == 2)
                return DISK_EVENT_MEDIA_CHANGE;
        else if (med->media_event_code == 3)
-               return DISK_EVENT_EJECT_REQUEST;
+               return DISK_EVENT_MEDIA_CHANGE;
        return 0;
 }
 
@@ -712,7 +714,8 @@ static int sr_probe(struct device *dev)
 
        kref_init(&cd->kref);
 
-       disk = alloc_disk(1);
+       disk = __alloc_disk_node(sdev->request_queue, NUMA_NO_NODE,
+                                &sr_bio_compl_lkclass);
        if (!disk)
                goto fail_free;
        mutex_init(&cd->lock);
@@ -729,6 +732,7 @@ static int sr_probe(struct device *dev)
 
        disk->major = SCSI_CDROM_MAJOR;
        disk->first_minor = minor;
+       disk->minors = 1;
        sprintf(disk->disk_name, "sr%d", minor);
        disk->fops = &sr_bdops;
        disk->flags = GENHD_FL_CD | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
@@ -762,7 +766,6 @@ static int sr_probe(struct device *dev)
 
        set_capacity(disk, cd->capacity);
        disk->private_data = &cd->driver;
-       disk->queue = sdev->request_queue;
 
        if (register_cdrom(disk, &cd->cdi))
                goto fail_minor;
index c6f1454..d1abc02 100644 (file)
@@ -309,13 +309,8 @@ static char * st_incompatible(struct scsi_device* SDp)
 }
 \f
 
-static inline char *tape_name(struct scsi_tape *tape)
-{
-       return tape->disk->disk_name;
-}
-
 #define st_printk(prefix, t, fmt, a...) \
-       sdev_prefix_printk(prefix, (t)->device, tape_name(t), fmt, ##a)
+       sdev_prefix_printk(prefix, (t)->device, (t)->name, fmt, ##a)
 #ifdef DEBUG
 #define DEBC_printk(t, fmt, a...) \
        if (debugging) { st_printk(ST_DEB_MSG, t, fmt, ##a ); }
@@ -363,7 +358,7 @@ static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt)
        int result = SRpnt->result;
        u8 scode;
        DEB(const char *stp;)
-       char *name = tape_name(STp);
+       char *name = STp->name;
        struct st_cmdstatus *cmdstatp;
 
        if (!result)
@@ -3841,8 +3836,9 @@ static long st_ioctl_common(struct file *file, unsigned int cmd_in, void __user
                            !capable(CAP_SYS_RAWIO))
                                i = -EPERM;
                        else
-                               i = scsi_cmd_ioctl(STp->disk->queue, STp->disk,
-                                                  file->f_mode, cmd_in, p);
+                               i = scsi_cmd_ioctl(STp->device->request_queue,
+                                                  NULL, file->f_mode, cmd_in,
+                                                  p);
                        if (i != -ENOTTY)
                                return i;
                        break;
@@ -4216,7 +4212,7 @@ static int create_one_cdev(struct scsi_tape *tape, int mode, int rew)
 
        i = mode << (4 - ST_NBR_MODE_BITS);
        snprintf(name, 10, "%s%s%s", rew ? "n" : "",
-                tape->disk->disk_name, st_formats[i]);
+                tape->name, st_formats[i]);
 
        dev = device_create(&st_sysfs_class, &tape->device->sdev_gendev,
                            cdev_devno, &tape->modes[mode], "%s", name);
@@ -4271,7 +4267,6 @@ static void remove_cdevs(struct scsi_tape *tape)
 static int st_probe(struct device *dev)
 {
        struct scsi_device *SDp = to_scsi_device(dev);
-       struct gendisk *disk = NULL;
        struct scsi_tape *tpnt = NULL;
        struct st_modedef *STm;
        struct st_partstat *STps;
@@ -4301,27 +4296,13 @@ static int st_probe(struct device *dev)
                goto out;
        }
 
-       disk = alloc_disk(1);
-       if (!disk) {
-               sdev_printk(KERN_ERR, SDp,
-                           "st: out of memory. Device not attached.\n");
-               goto out_buffer_free;
-       }
-
        tpnt = kzalloc(sizeof(struct scsi_tape), GFP_KERNEL);
        if (tpnt == NULL) {
                sdev_printk(KERN_ERR, SDp,
                            "st: Can't allocate device descriptor.\n");
-               goto out_put_disk;
+               goto out_buffer_free;
        }
        kref_init(&tpnt->kref);
-       tpnt->disk = disk;
-       disk->private_data = &tpnt->driver;
-       /* SCSI tape doesn't register this gendisk via add_disk().  Manually
-        * take queue reference that release_disk() expects. */
-       if (!blk_get_queue(SDp->request_queue))
-               goto out_put_disk;
-       disk->queue = SDp->request_queue;
        tpnt->driver = &st_template;
 
        tpnt->device = SDp;
@@ -4394,10 +4375,10 @@ static int st_probe(struct device *dev)
        idr_preload_end();
        if (error < 0) {
                pr_warn("st: idr allocation failed: %d\n", error);
-               goto out_put_queue;
+               goto out_free_tape;
        }
        tpnt->index = error;
-       sprintf(disk->disk_name, "st%d", tpnt->index);
+       sprintf(tpnt->name, "st%d", tpnt->index);
        tpnt->stats = kzalloc(sizeof(struct scsi_tape_stats), GFP_KERNEL);
        if (tpnt->stats == NULL) {
                sdev_printk(KERN_ERR, SDp,
@@ -4414,9 +4395,9 @@ static int st_probe(struct device *dev)
        scsi_autopm_put_device(SDp);
 
        sdev_printk(KERN_NOTICE, SDp,
-                   "Attached scsi tape %s\n", tape_name(tpnt));
+                   "Attached scsi tape %s\n", tpnt->name);
        sdev_printk(KERN_INFO, SDp, "%s: try direct i/o: %s (alignment %d B)\n",
-                   tape_name(tpnt), tpnt->try_dio ? "yes" : "no",
+                   tpnt->name, tpnt->try_dio ? "yes" : "no",
                    queue_dma_alignment(SDp->request_queue) + 1);
 
        return 0;
@@ -4428,10 +4409,7 @@ out_idr_remove:
        spin_lock(&st_index_lock);
        idr_remove(&st_index_idr, tpnt->index);
        spin_unlock(&st_index_lock);
-out_put_queue:
-       blk_put_queue(disk->queue);
-out_put_disk:
-       put_disk(disk);
+out_free_tape:
        kfree(tpnt);
 out_buffer_free:
        kfree(buffer);
@@ -4470,7 +4448,6 @@ static int st_remove(struct device *dev)
 static void scsi_tape_release(struct kref *kref)
 {
        struct scsi_tape *tpnt = to_scsi_tape(kref);
-       struct gendisk *disk = tpnt->disk;
 
        tpnt->device = NULL;
 
@@ -4480,8 +4457,6 @@ static void scsi_tape_release(struct kref *kref)
                kfree(tpnt->buffer);
        }
 
-       disk->private_data = NULL;
-       put_disk(disk);
        kfree(tpnt->stats);
        kfree(tpnt);
        return;
index 9d3c38b..c0ef0d9 100644 (file)
@@ -187,7 +187,7 @@ struct scsi_tape {
        unsigned char last_cmnd[6];
        unsigned char last_sense[16];
 #endif
-       struct gendisk *disk;
+       char name[DISK_NAME_LEN];
        struct kref     kref;
        struct scsi_tape_stats *stats;
 };
index 328bb96..37506b3 100644 (file)
@@ -1199,14 +1199,24 @@ static void storvsc_on_io_completion(struct storvsc_device *stor_device,
                vstor_packet->vm_srb.sense_info_length);
 
        if (vstor_packet->vm_srb.scsi_status != 0 ||
-           vstor_packet->vm_srb.srb_status != SRB_STATUS_SUCCESS)
-               storvsc_log(device, STORVSC_LOGGING_ERROR,
+           vstor_packet->vm_srb.srb_status != SRB_STATUS_SUCCESS) {
+
+               /*
+                * Log TEST_UNIT_READY errors only as warnings. Hyper-V can
+                * return errors when detecting devices using TEST_UNIT_READY,
+                * and logging these as errors produces unhelpful noise.
+                */
+               int loglevel = (stor_pkt->vm_srb.cdb[0] == TEST_UNIT_READY) ?
+                       STORVSC_LOGGING_WARN : STORVSC_LOGGING_ERROR;
+
+               storvsc_log(device, loglevel,
                        "tag#%d cmd 0x%x status: scsi 0x%x srb 0x%x hv 0x%x\n",
                        request->cmd->request->tag,
                        stor_pkt->vm_srb.cdb[0],
                        vstor_packet->vm_srb.scsi_status,
                        vstor_packet->vm_srb.srb_status,
                        vstor_packet->status);
+       }
 
        if (vstor_packet->vm_srb.scsi_status == SAM_STAT_CHECK_CONDITION &&
            (vstor_packet->vm_srb.srb_status & SRB_STATUS_AUTOSENSE_VALID))
index f2b5d34..e5ae262 100644 (file)
@@ -66,7 +66,7 @@ int slim_alloc_txn_tid(struct slim_controller *ctrl, struct slim_msg_txn *txn)
        int ret = 0;
 
        spin_lock_irqsave(&ctrl->txn_lock, flags);
-       ret = idr_alloc_cyclic(&ctrl->tid_idr, txn, 0,
+       ret = idr_alloc_cyclic(&ctrl->tid_idr, txn, 1,
                                SLIM_MAX_TIDS, GFP_ATOMIC);
        if (ret < 0) {
                spin_unlock_irqrestore(&ctrl->txn_lock, flags);
@@ -131,7 +131,8 @@ int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn)
                        goto slim_xfer_err;
                }
        }
-
+       /* Initialize tid to invalid value */
+       txn->tid = 0;
        need_tid = slim_tid_txn(txn->mt, txn->mc);
 
        if (need_tid) {
@@ -163,7 +164,7 @@ int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn)
                        txn->mt, txn->mc, txn->la, ret);
 
 slim_xfer_err:
-       if (!clk_pause_msg && (!need_tid  || ret == -ETIMEDOUT)) {
+       if (!clk_pause_msg && (txn->tid == 0  || ret == -ETIMEDOUT)) {
                /*
                 * remove runtime-pm vote if this was TX only, or
                 * if there was error during this transaction
index c054e83..7040293 100644 (file)
@@ -618,7 +618,7 @@ static void qcom_slim_ngd_rx(struct qcom_slim_ngd_ctrl *ctrl, u8 *buf)
                (mc == SLIM_USR_MC_GENERIC_ACK &&
                 mt == SLIM_MSG_MT_SRC_REFERRED_USER)) {
                slim_msg_response(&ctrl->ctrl, &buf[4], buf[3], len - 4);
-               pm_runtime_mark_last_busy(ctrl->dev);
+               pm_runtime_mark_last_busy(ctrl->ctrl.dev);
        }
 }
 
@@ -1080,7 +1080,8 @@ static void qcom_slim_ngd_setup(struct qcom_slim_ngd_ctrl *ctrl)
 {
        u32 cfg = readl_relaxed(ctrl->ngd->base);
 
-       if (ctrl->state == QCOM_SLIM_NGD_CTRL_DOWN)
+       if (ctrl->state == QCOM_SLIM_NGD_CTRL_DOWN ||
+               ctrl->state == QCOM_SLIM_NGD_CTRL_ASLEEP)
                qcom_slim_ngd_init_dma(ctrl);
 
        /* By default enable message queues */
@@ -1131,6 +1132,7 @@ static int qcom_slim_ngd_power_up(struct qcom_slim_ngd_ctrl *ctrl)
                        dev_info(ctrl->dev, "Subsys restart: ADSP active framer\n");
                        return 0;
                }
+               qcom_slim_ngd_setup(ctrl);
                return 0;
        }
 
@@ -1257,13 +1259,14 @@ static int qcom_slim_ngd_enable(struct qcom_slim_ngd_ctrl *ctrl, bool enable)
                }
                /* controller state should be in sync with framework state */
                complete(&ctrl->qmi.qmi_comp);
-               if (!pm_runtime_enabled(ctrl->dev) ||
-                               !pm_runtime_suspended(ctrl->dev))
-                       qcom_slim_ngd_runtime_resume(ctrl->dev);
+               if (!pm_runtime_enabled(ctrl->ctrl.dev) ||
+                        !pm_runtime_suspended(ctrl->ctrl.dev))
+                       qcom_slim_ngd_runtime_resume(ctrl->ctrl.dev);
                else
-                       pm_runtime_resume(ctrl->dev);
-               pm_runtime_mark_last_busy(ctrl->dev);
-               pm_runtime_put(ctrl->dev);
+                       pm_runtime_resume(ctrl->ctrl.dev);
+
+               pm_runtime_mark_last_busy(ctrl->ctrl.dev);
+               pm_runtime_put(ctrl->ctrl.dev);
 
                ret = slim_register_controller(&ctrl->ctrl);
                if (ret) {
@@ -1389,7 +1392,7 @@ static int qcom_slim_ngd_ssr_pdr_notify(struct qcom_slim_ngd_ctrl *ctrl,
                /* Make sure the last dma xfer is finished */
                mutex_lock(&ctrl->tx_lock);
                if (ctrl->state != QCOM_SLIM_NGD_CTRL_DOWN) {
-                       pm_runtime_get_noresume(ctrl->dev);
+                       pm_runtime_get_noresume(ctrl->ctrl.dev);
                        ctrl->state = QCOM_SLIM_NGD_CTRL_DOWN;
                        qcom_slim_ngd_down(ctrl);
                        qcom_slim_ngd_exit_dma(ctrl);
@@ -1617,6 +1620,7 @@ static int __maybe_unused qcom_slim_ngd_runtime_suspend(struct device *dev)
        struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev);
        int ret = 0;
 
+       qcom_slim_ngd_exit_dma(ctrl);
        if (!ctrl->qmi.handle)
                return 0;
 
index f678e4d..a05e9fb 100644 (file)
@@ -13,7 +13,7 @@ obj-$(CONFIG_MACH_DOVE)               += dove/
 obj-y                          += fsl/
 obj-$(CONFIG_ARCH_GEMINI)      += gemini/
 obj-y                          += imx/
-obj-$(CONFIG_ARCH_IXP4XX)      += ixp4xx/
+obj-y                          += ixp4xx/
 obj-$(CONFIG_SOC_XWAY)         += lantiq/
 obj-$(CONFIG_LITEX_SOC_CONTROLLER) += litex/
 obj-y                          += mediatek/
index 3f711c1..bbae3d3 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/signal.h>
 #include <linux/device.h>
 #include <linux/spinlock.h>
+#include <linux/platform_device.h>
 #include <asm/irq.h>
 #include <asm/io.h>
 #include <soc/fsl/qe/qe.h>
@@ -53,8 +54,8 @@ struct qe_ic {
        struct irq_chip hc_irq;
 
        /* VIRQ numbers of QE high/low irqs */
-       unsigned int virq_high;
-       unsigned int virq_low;
+       int virq_high;
+       int virq_low;
 };
 
 /*
@@ -404,42 +405,40 @@ static void qe_ic_cascade_muxed_mpic(struct irq_desc *desc)
        chip->irq_eoi(&desc->irq_data);
 }
 
-static void __init qe_ic_init(struct device_node *node)
+static int qe_ic_init(struct platform_device *pdev)
 {
+       struct device *dev = &pdev->dev;
        void (*low_handler)(struct irq_desc *desc);
        void (*high_handler)(struct irq_desc *desc);
        struct qe_ic *qe_ic;
-       struct resource res;
-       u32 ret;
+       struct resource *res;
+       struct device_node *node = pdev->dev.of_node;
 
-       ret = of_address_to_resource(node, 0, &res);
-       if (ret)
-               return;
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (res == NULL) {
+               dev_err(dev, "no memory resource defined\n");
+               return -ENODEV;
+       }
 
-       qe_ic = kzalloc(sizeof(*qe_ic), GFP_KERNEL);
+       qe_ic = devm_kzalloc(dev, sizeof(*qe_ic), GFP_KERNEL);
        if (qe_ic == NULL)
-               return;
+               return -ENOMEM;
 
-       qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS,
-                                              &qe_ic_host_ops, qe_ic);
-       if (qe_ic->irqhost == NULL) {
-               kfree(qe_ic);
-               return;
+       qe_ic->regs = devm_ioremap(dev, res->start, resource_size(res));
+       if (qe_ic->regs == NULL) {
+               dev_err(dev, "failed to ioremap() registers\n");
+               return -ENODEV;
        }
 
-       qe_ic->regs = ioremap(res.start, resource_size(&res));
-
        qe_ic->hc_irq = qe_ic_irq_chip;
 
-       qe_ic->virq_high = irq_of_parse_and_map(node, 0);
-       qe_ic->virq_low = irq_of_parse_and_map(node, 1);
+       qe_ic->virq_high = platform_get_irq(pdev, 0);
+       qe_ic->virq_low = platform_get_irq(pdev, 1);
 
-       if (!qe_ic->virq_low) {
-               printk(KERN_ERR "Failed to map QE_IC low IRQ\n");
-               kfree(qe_ic);
-               return;
-       }
-       if (qe_ic->virq_high != qe_ic->virq_low) {
+       if (qe_ic->virq_low <= 0)
+               return -ENODEV;
+
+       if (qe_ic->virq_high > 0 && qe_ic->virq_high != qe_ic->virq_low) {
                low_handler = qe_ic_cascade_low;
                high_handler = qe_ic_cascade_high;
        } else {
@@ -447,29 +446,42 @@ static void __init qe_ic_init(struct device_node *node)
                high_handler = NULL;
        }
 
+       qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS,
+                                              &qe_ic_host_ops, qe_ic);
+       if (qe_ic->irqhost == NULL) {
+               dev_err(dev, "failed to add irq domain\n");
+               return -ENODEV;
+       }
+
        qe_ic_write(qe_ic->regs, QEIC_CICR, 0);
 
        irq_set_handler_data(qe_ic->virq_low, qe_ic);
        irq_set_chained_handler(qe_ic->virq_low, low_handler);
 
-       if (qe_ic->virq_high && qe_ic->virq_high != qe_ic->virq_low) {
+       if (high_handler) {
                irq_set_handler_data(qe_ic->virq_high, qe_ic);
                irq_set_chained_handler(qe_ic->virq_high, high_handler);
        }
+       return 0;
 }
+static const struct of_device_id qe_ic_ids[] = {
+       { .compatible = "fsl,qe-ic"},
+       { .type = "qeic"},
+       {},
+};
 
-static int __init qe_ic_of_init(void)
+static struct platform_driver qe_ic_driver =
 {
-       struct device_node *np;
+       .driver = {
+               .name           = "qe-ic",
+               .of_match_table = qe_ic_ids,
+       },
+       .probe  = qe_ic_init,
+};
 
-       np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic");
-       if (!np) {
-               np = of_find_node_by_type(NULL, "qeic");
-               if (!np)
-                       return -ENODEV;
-       }
-       qe_ic_init(np);
-       of_node_put(np);
+static int __init qe_ic_of_init(void)
+{
+       platform_driver_register(&qe_ic_driver);
        return 0;
 }
 subsys_initcall(qe_ic_of_init);
index 071e144..cc57a38 100644 (file)
@@ -5,8 +5,6 @@
 
 #include <linux/init.h>
 #include <linux/io.h>
-#include <linux/module.h>
-#include <linux/nvmem-consumer.h>
 #include <linux/of_address.h>
 #include <linux/slab.h>
 #include <linux/sys_soc.h>
@@ -31,7 +29,7 @@
 
 struct imx8_soc_data {
        char *name;
-       u32 (*soc_revision)(struct device *dev);
+       u32 (*soc_revision)(void);
 };
 
 static u64 soc_uid;
@@ -52,7 +50,7 @@ static u32 imx8mq_soc_revision_from_atf(void)
 static inline u32 imx8mq_soc_revision_from_atf(void) { return 0; };
 #endif
 
-static u32 __init imx8mq_soc_revision(struct device *dev)
+static u32 __init imx8mq_soc_revision(void)
 {
        struct device_node *np;
        void __iomem *ocotp_base;
@@ -77,20 +75,9 @@ static u32 __init imx8mq_soc_revision(struct device *dev)
                        rev = REV_B1;
        }
 
-       if (dev) {
-               int ret;
-
-               ret = nvmem_cell_read_u64(dev, "soc_unique_id", &soc_uid);
-               if (ret) {
-                       iounmap(ocotp_base);
-                       of_node_put(np);
-                       return ret;
-               }
-       } else {
-               soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH);
-               soc_uid <<= 32;
-               soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW);
-       }
+       soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH);
+       soc_uid <<= 32;
+       soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW);
 
        iounmap(ocotp_base);
        of_node_put(np);
@@ -120,7 +107,7 @@ static void __init imx8mm_soc_uid(void)
        of_node_put(np);
 }
 
-static u32 __init imx8mm_soc_revision(struct device *dev)
+static u32 __init imx8mm_soc_revision(void)
 {
        struct device_node *np;
        void __iomem *anatop_base;
@@ -138,15 +125,7 @@ static u32 __init imx8mm_soc_revision(struct device *dev)
        iounmap(anatop_base);
        of_node_put(np);
 
-       if (dev) {
-               int ret;
-
-               ret = nvmem_cell_read_u64(dev, "soc_unique_id", &soc_uid);
-               if (ret)
-                       return ret;
-       } else {
-               imx8mm_soc_uid();
-       }
+       imx8mm_soc_uid();
 
        return rev;
 }
@@ -171,7 +150,7 @@ static const struct imx8_soc_data imx8mp_soc_data = {
        .soc_revision = imx8mm_soc_revision,
 };
 
-static __maybe_unused const struct of_device_id imx8_machine_match[] = {
+static __maybe_unused const struct of_device_id imx8_soc_match[] = {
        { .compatible = "fsl,imx8mq", .data = &imx8mq_soc_data, },
        { .compatible = "fsl,imx8mm", .data = &imx8mm_soc_data, },
        { .compatible = "fsl,imx8mn", .data = &imx8mn_soc_data, },
@@ -179,20 +158,12 @@ static __maybe_unused const struct of_device_id imx8_machine_match[] = {
        { }
 };
 
-static __maybe_unused const struct of_device_id imx8_soc_match[] = {
-       { .compatible = "fsl,imx8mq-soc", .data = &imx8mq_soc_data, },
-       { .compatible = "fsl,imx8mm-soc", .data = &imx8mm_soc_data, },
-       { .compatible = "fsl,imx8mn-soc", .data = &imx8mn_soc_data, },
-       { .compatible = "fsl,imx8mp-soc", .data = &imx8mp_soc_data, },
-       { }
-};
-
 #define imx8_revision(soc_rev) \
        soc_rev ? \
        kasprintf(GFP_KERNEL, "%d.%d", (soc_rev >> 4) & 0xf,  soc_rev & 0xf) : \
        "unknown"
 
-static int imx8_soc_info(struct platform_device *pdev)
+static int __init imx8_soc_init(void)
 {
        struct soc_device_attribute *soc_dev_attr;
        struct soc_device *soc_dev;
@@ -211,10 +182,7 @@ static int imx8_soc_info(struct platform_device *pdev)
        if (ret)
                goto free_soc;
 
-       if (pdev)
-               id = of_match_node(imx8_soc_match, pdev->dev.of_node);
-       else
-               id = of_match_node(imx8_machine_match, of_root);
+       id = of_match_node(imx8_soc_match, of_root);
        if (!id) {
                ret = -ENODEV;
                goto free_soc;
@@ -223,16 +191,8 @@ static int imx8_soc_info(struct platform_device *pdev)
        data = id->data;
        if (data) {
                soc_dev_attr->soc_id = data->name;
-               if (data->soc_revision) {
-                       if (pdev) {
-                               soc_rev = data->soc_revision(&pdev->dev);
-                               ret = soc_rev;
-                               if (ret < 0)
-                                       goto free_soc;
-                       } else {
-                               soc_rev = data->soc_revision(NULL);
-                       }
-               }
+               if (data->soc_revision)
+                       soc_rev = data->soc_revision();
        }
 
        soc_dev_attr->revision = imx8_revision(soc_rev);
@@ -270,24 +230,4 @@ free_soc:
        kfree(soc_dev_attr);
        return ret;
 }
-
-/* Retain device_initcall is for backward compatibility with DTS. */
-static int __init imx8_soc_init(void)
-{
-       if (of_find_matching_node_and_match(NULL, imx8_soc_match, NULL))
-               return 0;
-
-       return imx8_soc_info(NULL);
-}
 device_initcall(imx8_soc_init);
-
-static struct platform_driver imx8_soc_info_driver = {
-       .probe = imx8_soc_info,
-       .driver = {
-               .name = "imx8_soc_info",
-               .of_match_table = imx8_soc_match,
-       },
-};
-
-module_platform_driver(imx8_soc_info_driver);
-MODULE_LICENSE("GPL v2");
index 7bd1935..f490c4c 100644 (file)
@@ -21,7 +21,6 @@
 #include <linux/of_platform.h>
 #include <linux/platform_device.h>
 #include <linux/soc/ixp4xx/npe.h>
-#include <mach/hardware.h>
 #include <linux/soc/ixp4xx/cpu.h>
 
 #define DEBUG_MSG                      0
@@ -694,8 +693,8 @@ static int ixp4xx_npe_probe(struct platform_device *pdev)
 
                if (!(ixp4xx_read_feature_bits() &
                      (IXP4XX_FEATURE_RESET_NPEA << i))) {
-                       dev_info(dev, "NPE%d at 0x%08x-0x%08x not available\n",
-                                i, res->start, res->end);
+                       dev_info(dev, "NPE%d at %pR not available\n",
+                                i, res);
                        continue; /* NPE already disabled or not present */
                }
                npe->regs = devm_ioremap_resource(dev, res);
@@ -703,13 +702,12 @@ static int ixp4xx_npe_probe(struct platform_device *pdev)
                        return PTR_ERR(npe->regs);
 
                if (npe_reset(npe)) {
-                       dev_info(dev, "NPE%d at 0x%08x-0x%08x does not reset\n",
-                                i, res->start, res->end);
+                       dev_info(dev, "NPE%d at %pR does not reset\n",
+                                i, res);
                        continue;
                }
                npe->valid = 1;
-               dev_info(dev, "NPE%d at 0x%08x-0x%08x registered\n",
-                        i, res->start, res->end);
+               dev_info(dev, "NPE%d at %pR registered\n", i, res);
                found++;
        }
 
index 7149510..9154c70 100644 (file)
@@ -12,7 +12,6 @@
 #include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/soc/ixp4xx/qmgr.h>
-#include <mach/hardware.h>
 #include <linux/soc/ixp4xx/cpu.h>
 
 static struct qmgr_regs __iomem *qmgr_regs;
@@ -147,12 +146,12 @@ static irqreturn_t qmgr_irq1_a0(int irq, void *pdev)
        /* ACK - it may clear any bits so don't rely on it */
        __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[0]);
 
-       en_bitmap = qmgr_regs->irqen[0];
+       en_bitmap = __raw_readl(&qmgr_regs->irqen[0]);
        while (en_bitmap) {
                i = __fls(en_bitmap); /* number of the last "low" queue */
                en_bitmap &= ~BIT(i);
-               src = qmgr_regs->irqsrc[i >> 3];
-               stat = qmgr_regs->stat1[i >> 3];
+               src = __raw_readl(&qmgr_regs->irqsrc[i >> 3]);
+               stat = __raw_readl(&qmgr_regs->stat1[i >> 3]);
                if (src & 4) /* the IRQ condition is inverted */
                        stat = ~stat;
                if (stat & BIT(src & 3)) {
@@ -172,7 +171,8 @@ static irqreturn_t qmgr_irq2_a0(int irq, void *pdev)
        /* ACK - it may clear any bits so don't rely on it */
        __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[1]);
 
-       req_bitmap = qmgr_regs->irqen[1] & qmgr_regs->statne_h;
+       req_bitmap = __raw_readl(&qmgr_regs->irqen[1]) &
+                    __raw_readl(&qmgr_regs->statne_h);
        while (req_bitmap) {
                i = __fls(req_bitmap); /* number of the last "high" queue */
                req_bitmap &= ~BIT(i);
index 20ace65..8b53ed1 100644 (file)
@@ -15,7 +15,7 @@ config ARCH_TEGRA_2x_SOC
        select PL310_ERRATA_769419 if CACHE_L2X0
        select SOC_TEGRA_FLOWCTRL
        select SOC_TEGRA_PMC
-       select SOC_TEGRA20_VOLTAGE_COUPLER
+       select SOC_TEGRA20_VOLTAGE_COUPLER if REGULATOR
        select TEGRA_TIMER
        help
          Support for NVIDIA Tegra AP20 and T20 processors, based on the
@@ -29,7 +29,7 @@ config ARCH_TEGRA_3x_SOC
        select PL310_ERRATA_769419 if CACHE_L2X0
        select SOC_TEGRA_FLOWCTRL
        select SOC_TEGRA_PMC
-       select SOC_TEGRA30_VOLTAGE_COUPLER
+       select SOC_TEGRA30_VOLTAGE_COUPLER if REGULATOR
        select TEGRA_TIMER
        help
          Support for NVIDIA Tegra T30 processor family, based on the
@@ -155,7 +155,9 @@ config SOC_TEGRA_POWERGATE_BPMP
 config SOC_TEGRA20_VOLTAGE_COUPLER
        bool "Voltage scaling support for Tegra20 SoCs"
        depends on ARCH_TEGRA_2x_SOC || COMPILE_TEST
+       depends on REGULATOR
 
 config SOC_TEGRA30_VOLTAGE_COUPLER
        bool "Voltage scaling support for Tegra30 SoCs"
        depends on ARCH_TEGRA_3x_SOC || COMPILE_TEST
+       depends on REGULATOR
index e71a4c5..83e352b 100644 (file)
@@ -658,6 +658,18 @@ config SPI_ROCKCHIP
          The main usecase of this controller is to use spi flash as boot
          device.
 
+config SPI_ROCKCHIP_SFC
+       tristate "Rockchip Serial Flash Controller (SFC)"
+       depends on ARCH_ROCKCHIP || COMPILE_TEST
+       depends on HAS_IOMEM && HAS_DMA
+       help
+         This enables support for Rockchip serial flash controller. This
+         is a specialized controller used to access SPI flash on some
+         Rockchip SOCs.
+
+         ROCKCHIP SFC supports DMA and PIO modes. When DMA is not available,
+         the driver automatically falls back to PIO mode.
+
 config SPI_RB4XX
        tristate "Mikrotik RB4XX SPI master"
        depends on SPI_MASTER && ATH79
index 13e54c4..699db95 100644 (file)
@@ -95,6 +95,7 @@ obj-$(CONFIG_SPI_QCOM_GENI)           += spi-geni-qcom.o
 obj-$(CONFIG_SPI_QCOM_QSPI)            += spi-qcom-qspi.o
 obj-$(CONFIG_SPI_QUP)                  += spi-qup.o
 obj-$(CONFIG_SPI_ROCKCHIP)             += spi-rockchip.o
+obj-$(CONFIG_SPI_ROCKCHIP_SFC)         += spi-rockchip-sfc.o
 obj-$(CONFIG_SPI_RB4XX)                        += spi-rb4xx.o
 obj-$(CONFIG_MACH_REALTEK_RTL)         += spi-realtek-rtl.o
 obj-$(CONFIG_SPI_RPCIF)                        += spi-rpc-if.o
index 37eab10..7d709a8 100644 (file)
@@ -143,12 +143,12 @@ static void bcm2835aux_debugfs_remove(struct bcm2835aux_spi *bs)
 }
 #endif /* CONFIG_DEBUG_FS */
 
-static inline u32 bcm2835aux_rd(struct bcm2835aux_spi *bs, unsigned reg)
+static inline u32 bcm2835aux_rd(struct bcm2835aux_spi *bs, unsigned int reg)
 {
        return readl(bs->regs + reg);
 }
 
-static inline void bcm2835aux_wr(struct bcm2835aux_spi *bs, unsigned reg,
+static inline void bcm2835aux_wr(struct bcm2835aux_spi *bs, unsigned int reg,
                                 u32 val)
 {
        writel(val, bs->regs + reg);
index a2de235..101cc71 100644 (file)
@@ -325,7 +325,15 @@ static int cqspi_set_protocol(struct cqspi_flash_pdata *f_pdata,
        f_pdata->inst_width = CQSPI_INST_TYPE_SINGLE;
        f_pdata->addr_width = CQSPI_INST_TYPE_SINGLE;
        f_pdata->data_width = CQSPI_INST_TYPE_SINGLE;
-       f_pdata->dtr = op->data.dtr && op->cmd.dtr && op->addr.dtr;
+
+       /*
+        * For an op to be DTR, cmd phase along with every other non-empty
+        * phase should have dtr field set to 1. If an op phase has zero
+        * nbytes, ignore its dtr field; otherwise, check its dtr field.
+        */
+       f_pdata->dtr = op->cmd.dtr &&
+                      (!op->addr.nbytes || op->addr.dtr) &&
+                      (!op->data.nbytes || op->data.dtr);
 
        switch (op->data.buswidth) {
        case 0:
@@ -1228,8 +1236,15 @@ static bool cqspi_supports_mem_op(struct spi_mem *mem,
 {
        bool all_true, all_false;
 
-       all_true = op->cmd.dtr && op->addr.dtr && op->dummy.dtr &&
-                  op->data.dtr;
+       /*
+        * op->dummy.dtr is required for converting nbytes into ncycles.
+        * Also, don't check the dtr field of the op phase having zero nbytes.
+        */
+       all_true = op->cmd.dtr &&
+                  (!op->addr.nbytes || op->addr.dtr) &&
+                  (!op->dummy.nbytes || op->dummy.dtr) &&
+                  (!op->data.nbytes || op->data.dtr);
+
        all_false = !op->cmd.dtr && !op->addr.dtr && !op->dummy.dtr &&
                    !op->data.dtr;
 
index 8996115..263ce90 100644 (file)
@@ -444,7 +444,7 @@ static int mcfqspi_remove(struct platform_device *pdev)
        mcfqspi_wr_qmr(mcfqspi, MCFQSPI_QMR_MSTR);
 
        mcfqspi_cs_teardown(mcfqspi);
-       clk_disable(mcfqspi->clk);
+       clk_disable_unprepare(mcfqspi->clk);
 
        return 0;
 }
index e114e6f..d112c2c 100644 (file)
@@ -213,12 +213,6 @@ static void davinci_spi_chipselect(struct spi_device *spi, int value)
         * line for the controller
         */
        if (spi->cs_gpiod) {
-               /*
-                * FIXME: is this code ever executed? This host does not
-                * set SPI_MASTER_GPIO_SS so this chipselect callback should
-                * not get called from the SPI core when we are using
-                * GPIOs for chip select.
-                */
                if (value == BITBANG_CS_ACTIVE)
                        gpiod_set_value(spi->cs_gpiod, 1);
                else
@@ -945,7 +939,7 @@ static int davinci_spi_probe(struct platform_device *pdev)
        master->bus_num = pdev->id;
        master->num_chipselect = pdata->num_chipselect;
        master->bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 16);
-       master->flags = SPI_MASTER_MUST_RX;
+       master->flags = SPI_MASTER_MUST_RX | SPI_MASTER_GPIO_SS;
        master->setup = davinci_spi_setup;
        master->cleanup = davinci_spi_cleanup;
        master->can_dma = davinci_spi_can_dma;
index aa67655..5896a7b 100644 (file)
@@ -550,7 +550,7 @@ static int ep93xx_spi_prepare_hardware(struct spi_master *master)
        u32 val;
        int ret;
 
-       ret = clk_enable(espi->clk);
+       ret = clk_prepare_enable(espi->clk);
        if (ret)
                return ret;
 
@@ -570,7 +570,7 @@ static int ep93xx_spi_unprepare_hardware(struct spi_master *master)
        val &= ~SSPCR1_SSE;
        writel(val, espi->mmio + SSPCR1);
 
-       clk_disable(espi->clk);
+       clk_disable_unprepare(espi->clk);
 
        return 0;
 }
index 87f8829..829770b 100644 (file)
 
 #define SPI_FSI_BASE                   0x70000
 #define SPI_FSI_INIT_TIMEOUT_MS                1000
-#define SPI_FSI_MAX_XFR_SIZE           2048
-#define SPI_FSI_MAX_XFR_SIZE_RESTRICTED        8
+#define SPI_FSI_MAX_RX_SIZE            8
+#define SPI_FSI_MAX_TX_SIZE            40
 
 #define SPI_FSI_ERROR                  0x0
 #define SPI_FSI_COUNTER_CFG            0x1
-#define  SPI_FSI_COUNTER_CFG_LOOPS(x)   (((u64)(x) & 0xffULL) << 32)
-#define  SPI_FSI_COUNTER_CFG_N2_RX      BIT_ULL(8)
-#define  SPI_FSI_COUNTER_CFG_N2_TX      BIT_ULL(9)
-#define  SPI_FSI_COUNTER_CFG_N2_IMPLICIT BIT_ULL(10)
-#define  SPI_FSI_COUNTER_CFG_N2_RELOAD  BIT_ULL(11)
 #define SPI_FSI_CFG1                   0x2
 #define SPI_FSI_CLOCK_CFG              0x3
 #define  SPI_FSI_CLOCK_CFG_MM_ENABLE    BIT_ULL(32)
@@ -76,8 +71,6 @@ struct fsi_spi {
        struct device *dev;     /* SPI controller device */
        struct fsi_device *fsi; /* FSI2SPI CFAM engine device */
        u32 base;
-       size_t max_xfr_size;
-       bool restricted;
 };
 
 struct fsi_spi_sequence {
@@ -241,7 +234,7 @@ static int fsi_spi_reset(struct fsi_spi *ctx)
        return fsi_spi_write_reg(ctx, SPI_FSI_STATUS, 0ULL);
 }
 
-static int fsi_spi_sequence_add(struct fsi_spi_sequence *seq, u8 val)
+static void fsi_spi_sequence_add(struct fsi_spi_sequence *seq, u8 val)
 {
        /*
         * Add the next byte of instruction to the 8-byte sequence register.
@@ -251,8 +244,6 @@ static int fsi_spi_sequence_add(struct fsi_spi_sequence *seq, u8 val)
         */
        seq->data |= (u64)val << seq->bit;
        seq->bit -= 8;
-
-       return ((64 - seq->bit) / 8) - 2;
 }
 
 static void fsi_spi_sequence_init(struct fsi_spi_sequence *seq)
@@ -261,71 +252,11 @@ static void fsi_spi_sequence_init(struct fsi_spi_sequence *seq)
        seq->data = 0ULL;
 }
 
-static int fsi_spi_sequence_transfer(struct fsi_spi *ctx,
-                                    struct fsi_spi_sequence *seq,
-                                    struct spi_transfer *transfer)
-{
-       int loops;
-       int idx;
-       int rc;
-       u8 val = 0;
-       u8 len = min(transfer->len, 8U);
-       u8 rem = transfer->len % len;
-
-       loops = transfer->len / len;
-
-       if (transfer->tx_buf) {
-               val = SPI_FSI_SEQUENCE_SHIFT_OUT(len);
-               idx = fsi_spi_sequence_add(seq, val);
-
-               if (rem)
-                       rem = SPI_FSI_SEQUENCE_SHIFT_OUT(rem);
-       } else if (transfer->rx_buf) {
-               val = SPI_FSI_SEQUENCE_SHIFT_IN(len);
-               idx = fsi_spi_sequence_add(seq, val);
-
-               if (rem)
-                       rem = SPI_FSI_SEQUENCE_SHIFT_IN(rem);
-       } else {
-               return -EINVAL;
-       }
-
-       if (ctx->restricted && loops > 1) {
-               dev_warn(ctx->dev,
-                        "Transfer too large; no branches permitted.\n");
-               return -EINVAL;
-       }
-
-       if (loops > 1) {
-               u64 cfg = SPI_FSI_COUNTER_CFG_LOOPS(loops - 1);
-
-               fsi_spi_sequence_add(seq, SPI_FSI_SEQUENCE_BRANCH(idx));
-
-               if (transfer->rx_buf)
-                       cfg |= SPI_FSI_COUNTER_CFG_N2_RX |
-                               SPI_FSI_COUNTER_CFG_N2_TX |
-                               SPI_FSI_COUNTER_CFG_N2_IMPLICIT |
-                               SPI_FSI_COUNTER_CFG_N2_RELOAD;
-
-               rc = fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG, cfg);
-               if (rc)
-                       return rc;
-       } else {
-               fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG, 0ULL);
-       }
-
-       if (rem)
-               fsi_spi_sequence_add(seq, rem);
-
-       return 0;
-}
-
 static int fsi_spi_transfer_data(struct fsi_spi *ctx,
                                 struct spi_transfer *transfer)
 {
        int rc = 0;
        u64 status = 0ULL;
-       u64 cfg = 0ULL;
 
        if (transfer->tx_buf) {
                int nb;
@@ -363,16 +294,6 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx,
                u64 in = 0ULL;
                u8 *rx = transfer->rx_buf;
 
-               rc = fsi_spi_read_reg(ctx, SPI_FSI_COUNTER_CFG, &cfg);
-               if (rc)
-                       return rc;
-
-               if (cfg & SPI_FSI_COUNTER_CFG_N2_IMPLICIT) {
-                       rc = fsi_spi_write_reg(ctx, SPI_FSI_DATA_TX, 0);
-                       if (rc)
-                               return rc;
-               }
-
                while (transfer->len > recv) {
                        do {
                                rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS,
@@ -439,6 +360,10 @@ static int fsi_spi_transfer_init(struct fsi_spi *ctx)
                }
        } while (seq_state && (seq_state != SPI_FSI_STATUS_SEQ_STATE_IDLE));
 
+       rc = fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG, 0ULL);
+       if (rc)
+               return rc;
+
        rc = fsi_spi_read_reg(ctx, SPI_FSI_CLOCK_CFG, &clock_cfg);
        if (rc)
                return rc;
@@ -459,6 +384,7 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
 {
        int rc;
        u8 seq_slave = SPI_FSI_SEQUENCE_SEL_SLAVE(mesg->spi->chip_select + 1);
+       unsigned int len;
        struct spi_transfer *transfer;
        struct fsi_spi *ctx = spi_controller_get_devdata(ctlr);
 
@@ -471,8 +397,7 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
                struct spi_transfer *next = NULL;
 
                /* Sequencer must do shift out (tx) first. */
-               if (!transfer->tx_buf ||
-                   transfer->len > (ctx->max_xfr_size + 8)) {
+               if (!transfer->tx_buf || transfer->len > SPI_FSI_MAX_TX_SIZE) {
                        rc = -EINVAL;
                        goto error;
                }
@@ -486,9 +411,13 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
                fsi_spi_sequence_init(&seq);
                fsi_spi_sequence_add(&seq, seq_slave);
 
-               rc = fsi_spi_sequence_transfer(ctx, &seq, transfer);
-               if (rc)
-                       goto error;
+               len = transfer->len;
+               while (len > 8) {
+                       fsi_spi_sequence_add(&seq,
+                                            SPI_FSI_SEQUENCE_SHIFT_OUT(8));
+                       len -= 8;
+               }
+               fsi_spi_sequence_add(&seq, SPI_FSI_SEQUENCE_SHIFT_OUT(len));
 
                if (!list_is_last(&transfer->transfer_list,
                                  &mesg->transfers)) {
@@ -496,7 +425,9 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
 
                        /* Sequencer can only do shift in (rx) after tx. */
                        if (next->rx_buf) {
-                               if (next->len > ctx->max_xfr_size) {
+                               u8 shift;
+
+                               if (next->len > SPI_FSI_MAX_RX_SIZE) {
                                        rc = -EINVAL;
                                        goto error;
                                }
@@ -504,10 +435,8 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
                                dev_dbg(ctx->dev, "Sequence rx of %d bytes.\n",
                                        next->len);
 
-                               rc = fsi_spi_sequence_transfer(ctx, &seq,
-                                                              next);
-                               if (rc)
-                                       goto error;
+                               shift = SPI_FSI_SEQUENCE_SHIFT_IN(next->len);
+                               fsi_spi_sequence_add(&seq, shift);
                        } else {
                                next = NULL;
                        }
@@ -541,9 +470,7 @@ error:
 
 static size_t fsi_spi_max_transfer_size(struct spi_device *spi)
 {
-       struct fsi_spi *ctx = spi_controller_get_devdata(spi->controller);
-
-       return ctx->max_xfr_size;
+       return SPI_FSI_MAX_RX_SIZE;
 }
 
 static int fsi_spi_probe(struct device *dev)
@@ -582,14 +509,6 @@ static int fsi_spi_probe(struct device *dev)
                ctx->fsi = fsi;
                ctx->base = base + SPI_FSI_BASE;
 
-               if (of_device_is_compatible(np, "ibm,fsi2spi-restricted")) {
-                       ctx->restricted = true;
-                       ctx->max_xfr_size = SPI_FSI_MAX_XFR_SIZE_RESTRICTED;
-               } else {
-                       ctx->restricted = false;
-                       ctx->max_xfr_size = SPI_FSI_MAX_XFR_SIZE;
-               }
-
                rc = devm_spi_register_controller(dev, ctlr);
                if (rc)
                        spi_controller_put(ctlr);
index fb45e6a..fd004c9 100644 (file)
@@ -530,6 +530,7 @@ static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
                goto err_rx_dma_buf;
        }
 
+       memset(&cfg, 0, sizeof(cfg));
        cfg.src_addr = phy_addr + SPI_POPR;
        cfg.dst_addr = phy_addr + SPI_PUSHR;
        cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
index b3861fb..2f51421 100644 (file)
@@ -549,12 +549,6 @@ static void setup_fifo_xfer(struct spi_transfer *xfer,
         */
        spin_lock_irq(&mas->lock);
        geni_se_setup_m_cmd(se, m_cmd, FRAGMENTATION);
-
-       /*
-        * TX_WATERMARK_REG should be set after SPI configuration and
-        * setting up GENI SE engine, as driver starts data transfer
-        * for the watermark interrupt.
-        */
        if (m_cmd & SPI_TX_ONLY) {
                if (geni_spi_handle_tx(mas))
                        writel(mas->tx_wm, se->base + SE_GENI_TX_WATERMARK_REG);
index 4aee3db..8d8df51 100644 (file)
@@ -505,7 +505,9 @@ static int mx51_ecspi_prepare_message(struct spi_imx_data *spi_imx,
                                      struct spi_message *msg)
 {
        struct spi_device *spi = msg->spi;
+       struct spi_transfer *xfer;
        u32 ctrl = MX51_ECSPI_CTRL_ENABLE;
+       u32 min_speed_hz = ~0U;
        u32 testreg, delay;
        u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
 
@@ -577,9 +579,21 @@ static int mx51_ecspi_prepare_message(struct spi_imx_data *spi_imx,
         * be asserted before the SCLK polarity changes, which would disrupt
         * the SPI communication as the device on the other end would consider
         * the change of SCLK polarity as a clock tick already.
+        *
+        * Because spi_imx->spi_bus_clk is only set in bitbang prepare_message
+        * callback, iterate over all the transfers in spi_message, find the
+        * one with lowest bus frequency, and use that bus frequency for the
+        * delay calculation. In case all transfers have speed_hz == 0, then
+        * min_speed_hz is ~0 and the resulting delay is zero.
         */
-       delay = (2 * 1000000) / spi_imx->spi_bus_clk;
-       if (likely(delay < 10)) /* SCLK is faster than 100 kHz */
+       list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+               if (!xfer->speed_hz)
+                       continue;
+               min_speed_hz = min(xfer->speed_hz, min_speed_hz);
+       }
+
+       delay = (2 * 1000000) / min_speed_hz;
+       if (likely(delay < 10)) /* SCLK is faster than 200 kHz */
                udelay(delay);
        else                    /* SCLK is _very_ slow */
                usleep_range(delay, delay + 10);
@@ -1038,12 +1052,8 @@ static void spi_imx_set_burst_len(struct spi_imx_data *spi_imx, int n_bits)
 
 static void spi_imx_push(struct spi_imx_data *spi_imx)
 {
-       unsigned int burst_len, fifo_words;
+       unsigned int burst_len;
 
-       if (spi_imx->dynamic_burst)
-               fifo_words = 4;
-       else
-               fifo_words = spi_imx_bytes_per_word(spi_imx->bits_per_word);
        /*
         * Reload the FIFO when the remaining bytes to be transferred in the
         * current burst is 0. This only applies when bits_per_word is a
@@ -1062,7 +1072,7 @@ static void spi_imx_push(struct spi_imx_data *spi_imx)
 
                        spi_imx->remainder = burst_len;
                } else {
-                       spi_imx->remainder = fifo_words;
+                       spi_imx->remainder = spi_imx_bytes_per_word(spi_imx->bits_per_word);
                }
        }
 
@@ -1070,8 +1080,7 @@ static void spi_imx_push(struct spi_imx_data *spi_imx)
                if (!spi_imx->count)
                        break;
                if (spi_imx->dynamic_burst &&
-                   spi_imx->txfifo >= DIV_ROUND_UP(spi_imx->remainder,
-                                                    fifo_words))
+                   spi_imx->txfifo >= DIV_ROUND_UP(spi_imx->remainder, 4))
                        break;
                spi_imx->tx(spi_imx);
                spi_imx->txfifo++;
@@ -1181,6 +1190,7 @@ static int spi_imx_setupxfer(struct spi_device *spi,
         * dynamic_burst in that case.
         */
        if (spi_imx->devtype_data->dynamic_burst && !spi_imx->slave_mode &&
+           !(spi->mode & SPI_CS_WORD) &&
            (spi_imx->bits_per_word == 8 ||
            spi_imx->bits_per_word == 16 ||
            spi_imx->bits_per_word == 32)) {
@@ -1616,6 +1626,15 @@ static int spi_imx_probe(struct platform_device *pdev)
            is_imx53_ecspi(spi_imx))
                spi_imx->bitbang.master->mode_bits |= SPI_LOOP | SPI_READY;
 
+       if (is_imx51_ecspi(spi_imx) &&
+           device_property_read_u32(&pdev->dev, "cs-gpios", NULL))
+               /*
+                * When using HW-CS implementing SPI_CS_WORD can be done by just
+                * setting the burst length to the word size. This is
+                * considerably faster than manually controlling the CS.
+                */
+               spi_imx->bitbang.master->mode_bits |= SPI_CS_WORD;
+
        spi_imx->spi_drctl = spi_drctl;
 
        init_completion(&spi_imx->xfer_done);
index b2c4621..c208efe 100644 (file)
@@ -785,6 +785,8 @@ static int meson_spicc_remove(struct platform_device *pdev)
        clk_disable_unprepare(spicc->core);
        clk_disable_unprepare(spicc->pclk);
 
+       spi_master_put(spicc->master);
+
        return 0;
 }
 
index 68dca8c..386e8c8 100644 (file)
@@ -42,8 +42,9 @@
 #define SPI_CFG1_CS_IDLE_OFFSET           0
 #define SPI_CFG1_PACKET_LOOP_OFFSET       8
 #define SPI_CFG1_PACKET_LENGTH_OFFSET     16
-#define SPI_CFG1_GET_TICK_DLY_OFFSET      30
+#define SPI_CFG1_GET_TICK_DLY_OFFSET      29
 
+#define SPI_CFG1_GET_TICK_DLY_MASK        0xe0000000
 #define SPI_CFG1_CS_IDLE_MASK             0xff
 #define SPI_CFG1_PACKET_LOOP_MASK         0xff00
 #define SPI_CFG1_PACKET_LENGTH_MASK       0x3ff0000
@@ -90,6 +91,8 @@ struct mtk_spi_compatible {
        bool enhance_timing;
        /* some IC support DMA addr extension */
        bool dma_ext;
+       /* some IC no need unprepare SPI clk */
+       bool no_need_unprepare;
 };
 
 struct mtk_spi {
@@ -104,6 +107,7 @@ struct mtk_spi {
        struct scatterlist *tx_sgl, *rx_sgl;
        u32 tx_sgl_len, rx_sgl_len;
        const struct mtk_spi_compatible *dev_comp;
+       u32 spi_clk_hz;
 };
 
 static const struct mtk_spi_compatible mtk_common_compat;
@@ -135,12 +139,21 @@ static const struct mtk_spi_compatible mt8183_compat = {
        .enhance_timing = true,
 };
 
+static const struct mtk_spi_compatible mt6893_compat = {
+       .need_pad_sel = true,
+       .must_tx = true,
+       .enhance_timing = true,
+       .dma_ext = true,
+       .no_need_unprepare = true,
+};
+
 /*
  * A piece of default chip info unless the platform
  * supplies it.
  */
 static const struct mtk_chip_config mtk_default_chip_info = {
        .sample_sel = 0,
+       .tick_delay = 0,
 };
 
 static const struct of_device_id mtk_spi_of_match[] = {
@@ -174,6 +187,9 @@ static const struct of_device_id mtk_spi_of_match[] = {
        { .compatible = "mediatek,mt8192-spi",
                .data = (void *)&mt6765_compat,
        },
+       { .compatible = "mediatek,mt6893-spi",
+               .data = (void *)&mt6893_compat,
+       },
        {}
 };
 MODULE_DEVICE_TABLE(of, mtk_spi_of_match);
@@ -192,6 +208,65 @@ static void mtk_spi_reset(struct mtk_spi *mdata)
        writel(reg_val, mdata->base + SPI_CMD_REG);
 }
 
+static int mtk_spi_set_hw_cs_timing(struct spi_device *spi)
+{
+       struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
+       struct spi_delay *cs_setup = &spi->cs_setup;
+       struct spi_delay *cs_hold = &spi->cs_hold;
+       struct spi_delay *cs_inactive = &spi->cs_inactive;
+       u32 setup, hold, inactive;
+       u32 reg_val;
+       int delay;
+
+       delay = spi_delay_to_ns(cs_setup, NULL);
+       if (delay < 0)
+               return delay;
+       setup = (delay * DIV_ROUND_UP(mdata->spi_clk_hz, 1000000)) / 1000;
+
+       delay = spi_delay_to_ns(cs_hold, NULL);
+       if (delay < 0)
+               return delay;
+       hold = (delay * DIV_ROUND_UP(mdata->spi_clk_hz, 1000000)) / 1000;
+
+       delay = spi_delay_to_ns(cs_inactive, NULL);
+       if (delay < 0)
+               return delay;
+       inactive = (delay * DIV_ROUND_UP(mdata->spi_clk_hz, 1000000)) / 1000;
+
+       setup    = setup ? setup : 1;
+       hold     = hold ? hold : 1;
+       inactive = inactive ? inactive : 1;
+
+       reg_val = readl(mdata->base + SPI_CFG0_REG);
+       if (mdata->dev_comp->enhance_timing) {
+               hold = min_t(u32, hold, 0x10000);
+               setup = min_t(u32, setup, 0x10000);
+               reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
+               reg_val |= (((hold - 1) & 0xffff)
+                          << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
+               reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
+               reg_val |= (((setup - 1) & 0xffff)
+                          << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
+       } else {
+               hold = min_t(u32, hold, 0x100);
+               setup = min_t(u32, setup, 0x100);
+               reg_val &= ~(0xff << SPI_CFG0_CS_HOLD_OFFSET);
+               reg_val |= (((hold - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET);
+               reg_val &= ~(0xff << SPI_CFG0_CS_SETUP_OFFSET);
+               reg_val |= (((setup - 1) & 0xff)
+                           << SPI_CFG0_CS_SETUP_OFFSET);
+       }
+       writel(reg_val, mdata->base + SPI_CFG0_REG);
+
+       inactive = min_t(u32, inactive, 0x100);
+       reg_val = readl(mdata->base + SPI_CFG1_REG);
+       reg_val &= ~SPI_CFG1_CS_IDLE_MASK;
+       reg_val |= (((inactive - 1) & 0xff) << SPI_CFG1_CS_IDLE_OFFSET);
+       writel(reg_val, mdata->base + SPI_CFG1_REG);
+
+       return 0;
+}
+
 static int mtk_spi_prepare_message(struct spi_master *master,
                                   struct spi_message *msg)
 {
@@ -261,6 +336,15 @@ static int mtk_spi_prepare_message(struct spi_master *master,
                writel(mdata->pad_sel[spi->chip_select],
                       mdata->base + SPI_PAD_SEL_REG);
 
+       /* tick delay */
+       reg_val = readl(mdata->base + SPI_CFG1_REG);
+       reg_val &= ~SPI_CFG1_GET_TICK_DLY_MASK;
+       reg_val |= ((chip_config->tick_delay & 0x7)
+               << SPI_CFG1_GET_TICK_DLY_OFFSET);
+       writel(reg_val, mdata->base + SPI_CFG1_REG);
+
+       /* set hw cs timing */
+       mtk_spi_set_hw_cs_timing(spi);
        return 0;
 }
 
@@ -287,12 +371,11 @@ static void mtk_spi_set_cs(struct spi_device *spi, bool enable)
 static void mtk_spi_prepare_transfer(struct spi_master *master,
                                     struct spi_transfer *xfer)
 {
-       u32 spi_clk_hz, div, sck_time, reg_val;
+       u32 div, sck_time, reg_val;
        struct mtk_spi *mdata = spi_master_get_devdata(master);
 
-       spi_clk_hz = clk_get_rate(mdata->spi_clk);
-       if (xfer->speed_hz < spi_clk_hz / 2)
-               div = DIV_ROUND_UP(spi_clk_hz, xfer->speed_hz);
+       if (xfer->speed_hz < mdata->spi_clk_hz / 2)
+               div = DIV_ROUND_UP(mdata->spi_clk_hz, xfer->speed_hz);
        else
                div = 1;
 
@@ -426,24 +509,15 @@ static int mtk_spi_fifo_transfer(struct spi_master *master,
        mtk_spi_prepare_transfer(master, xfer);
        mtk_spi_setup_packet(master);
 
-       cnt = xfer->len / 4;
-       if (xfer->tx_buf)
+       if (xfer->tx_buf) {
+               cnt = xfer->len / 4;
                iowrite32_rep(mdata->base + SPI_TX_DATA_REG, xfer->tx_buf, cnt);
-
-       if (xfer->rx_buf)
-               ioread32_rep(mdata->base + SPI_RX_DATA_REG, xfer->rx_buf, cnt);
-
-       remainder = xfer->len % 4;
-       if (remainder > 0) {
-               reg_val = 0;
-               if (xfer->tx_buf) {
+               remainder = xfer->len % 4;
+               if (remainder > 0) {
+                       reg_val = 0;
                        memcpy(&reg_val, xfer->tx_buf + (cnt * 4), remainder);
                        writel(reg_val, mdata->base + SPI_TX_DATA_REG);
                }
-               if (xfer->rx_buf) {
-                       reg_val = readl(mdata->base + SPI_RX_DATA_REG);
-                       memcpy(xfer->rx_buf + (cnt * 4), &reg_val, remainder);
-               }
        }
 
        mtk_spi_enable_transfer(master);
@@ -516,52 +590,6 @@ static bool mtk_spi_can_dma(struct spi_master *master,
                (unsigned long)xfer->rx_buf % 4 == 0);
 }
 
-static int mtk_spi_set_hw_cs_timing(struct spi_device *spi,
-                                   struct spi_delay *setup,
-                                   struct spi_delay *hold,
-                                   struct spi_delay *inactive)
-{
-       struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
-       u16 setup_dly, hold_dly, inactive_dly;
-       u32 reg_val;
-
-       if ((setup && setup->unit != SPI_DELAY_UNIT_SCK) ||
-           (hold && hold->unit != SPI_DELAY_UNIT_SCK) ||
-           (inactive && inactive->unit != SPI_DELAY_UNIT_SCK)) {
-               dev_err(&spi->dev,
-                       "Invalid delay unit, should be SPI_DELAY_UNIT_SCK\n");
-               return -EINVAL;
-       }
-
-       setup_dly = setup ? setup->value : 1;
-       hold_dly = hold ? hold->value : 1;
-       inactive_dly = inactive ? inactive->value : 1;
-
-       reg_val = readl(mdata->base + SPI_CFG0_REG);
-       if (mdata->dev_comp->enhance_timing) {
-               reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
-               reg_val |= (((hold_dly - 1) & 0xffff)
-                          << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
-               reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
-               reg_val |= (((setup_dly - 1) & 0xffff)
-                          << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
-       } else {
-               reg_val &= ~(0xff << SPI_CFG0_CS_HOLD_OFFSET);
-               reg_val |= (((hold_dly - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET);
-               reg_val &= ~(0xff << SPI_CFG0_CS_SETUP_OFFSET);
-               reg_val |= (((setup_dly - 1) & 0xff)
-                           << SPI_CFG0_CS_SETUP_OFFSET);
-       }
-       writel(reg_val, mdata->base + SPI_CFG0_REG);
-
-       reg_val = readl(mdata->base + SPI_CFG1_REG);
-       reg_val &= ~SPI_CFG1_CS_IDLE_MASK;
-       reg_val |= (((inactive_dly - 1) & 0xff) << SPI_CFG1_CS_IDLE_OFFSET);
-       writel(reg_val, mdata->base + SPI_CFG1_REG);
-
-       return 0;
-}
-
 static int mtk_spi_setup(struct spi_device *spi)
 {
        struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
@@ -799,7 +827,12 @@ static int mtk_spi_probe(struct platform_device *pdev)
                goto err_put_master;
        }
 
-       clk_disable_unprepare(mdata->spi_clk);
+       mdata->spi_clk_hz = clk_get_rate(mdata->spi_clk);
+
+       if (mdata->dev_comp->no_need_unprepare)
+               clk_disable(mdata->spi_clk);
+       else
+               clk_disable_unprepare(mdata->spi_clk);
 
        pm_runtime_enable(&pdev->dev);
 
@@ -867,6 +900,9 @@ static int mtk_spi_remove(struct platform_device *pdev)
 
        mtk_spi_reset(mdata);
 
+       if (mdata->dev_comp->no_need_unprepare)
+               clk_unprepare(mdata->spi_clk);
+
        return 0;
 }
 
@@ -915,7 +951,10 @@ static int mtk_spi_runtime_suspend(struct device *dev)
        struct spi_master *master = dev_get_drvdata(dev);
        struct mtk_spi *mdata = spi_master_get_devdata(master);
 
-       clk_disable_unprepare(mdata->spi_clk);
+       if (mdata->dev_comp->no_need_unprepare)
+               clk_disable(mdata->spi_clk);
+       else
+               clk_disable_unprepare(mdata->spi_clk);
 
        return 0;
 }
@@ -926,7 +965,10 @@ static int mtk_spi_runtime_resume(struct device *dev)
        struct mtk_spi *mdata = spi_master_get_devdata(master);
        int ret;
 
-       ret = clk_prepare_enable(mdata->spi_clk);
+       if (mdata->dev_comp->no_need_unprepare)
+               ret = clk_enable(mdata->spi_clk);
+       else
+               ret = clk_prepare_enable(mdata->spi_clk);
        if (ret < 0) {
                dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
                return ret;
index 37dfc6e..9708b78 100644 (file)
@@ -167,10 +167,17 @@ err_put_ctlr:
        return ret;
 }
 
+static const struct spi_device_id spi_mux_id[] = {
+       { "spi-mux" },
+       { }
+};
+MODULE_DEVICE_TABLE(spi, spi_mux_id);
+
 static const struct of_device_id spi_mux_of_match[] = {
        { .compatible = "spi-mux" },
        { }
 };
+MODULE_DEVICE_TABLE(of, spi_mux_of_match);
 
 static struct spi_driver spi_mux_driver = {
        .probe  = spi_mux_probe,
@@ -178,6 +185,7 @@ static struct spi_driver spi_mux_driver = {
                .name   = "spi-mux",
                .of_match_table = spi_mux_of_match,
        },
+       .id_table = spi_mux_id,
 };
 
 module_spi_driver(spi_mux_driver);
index 96b4182..4588994 100644 (file)
@@ -335,8 +335,10 @@ static int mxic_spi_data_xfer(struct mxic_spi *mxic, const void *txbuf,
 static bool mxic_spi_mem_supports_op(struct spi_mem *mem,
                                     const struct spi_mem_op *op)
 {
-       if (op->data.buswidth > 4 || op->addr.buswidth > 4 ||
-           op->dummy.buswidth > 4 || op->cmd.buswidth > 4)
+       bool all_false;
+
+       if (op->data.buswidth > 8 || op->addr.buswidth > 8 ||
+           op->dummy.buswidth > 8 || op->cmd.buswidth > 8)
                return false;
 
        if (op->data.nbytes && op->dummy.nbytes &&
@@ -346,7 +348,13 @@ static bool mxic_spi_mem_supports_op(struct spi_mem *mem,
        if (op->addr.nbytes > 7)
                return false;
 
-       return spi_mem_default_supports_op(mem, op);
+       all_false = !op->cmd.dtr && !op->addr.dtr && !op->dummy.dtr &&
+                   !op->data.dtr;
+
+       if (all_false)
+               return spi_mem_default_supports_op(mem, op);
+       else
+               return spi_mem_dtr_supports_op(mem, op);
 }
 
 static int mxic_spi_mem_exec_op(struct spi_mem *mem,
@@ -355,14 +363,15 @@ static int mxic_spi_mem_exec_op(struct spi_mem *mem,
        struct mxic_spi *mxic = spi_master_get_devdata(mem->spi->master);
        int nio = 1, i, ret;
        u32 ss_ctrl;
-       u8 addr[8];
-       u8 opcode = op->cmd.opcode;
+       u8 addr[8], cmd[2];
 
        ret = mxic_spi_set_freq(mxic, mem->spi->max_speed_hz);
        if (ret)
                return ret;
 
-       if (mem->spi->mode & (SPI_TX_QUAD | SPI_RX_QUAD))
+       if (mem->spi->mode & (SPI_TX_OCTAL | SPI_RX_OCTAL))
+               nio = 8;
+       else if (mem->spi->mode & (SPI_TX_QUAD | SPI_RX_QUAD))
                nio = 4;
        else if (mem->spi->mode & (SPI_TX_DUAL | SPI_RX_DUAL))
                nio = 2;
@@ -374,19 +383,26 @@ static int mxic_spi_mem_exec_op(struct spi_mem *mem,
               mxic->regs + HC_CFG);
        writel(HC_EN_BIT, mxic->regs + HC_EN);
 
-       ss_ctrl = OP_CMD_BYTES(1) | OP_CMD_BUSW(fls(op->cmd.buswidth) - 1);
+       ss_ctrl = OP_CMD_BYTES(op->cmd.nbytes) |
+                 OP_CMD_BUSW(fls(op->cmd.buswidth) - 1) |
+                 (op->cmd.dtr ? OP_CMD_DDR : 0);
 
        if (op->addr.nbytes)
                ss_ctrl |= OP_ADDR_BYTES(op->addr.nbytes) |
-                          OP_ADDR_BUSW(fls(op->addr.buswidth) - 1);
+                          OP_ADDR_BUSW(fls(op->addr.buswidth) - 1) |
+                          (op->addr.dtr ? OP_ADDR_DDR : 0);
 
        if (op->dummy.nbytes)
                ss_ctrl |= OP_DUMMY_CYC(op->dummy.nbytes);
 
        if (op->data.nbytes) {
-               ss_ctrl |= OP_DATA_BUSW(fls(op->data.buswidth) - 1);
-               if (op->data.dir == SPI_MEM_DATA_IN)
+               ss_ctrl |= OP_DATA_BUSW(fls(op->data.buswidth) - 1) |
+                          (op->data.dtr ? OP_DATA_DDR : 0);
+               if (op->data.dir == SPI_MEM_DATA_IN) {
                        ss_ctrl |= OP_READ;
+                       if (op->data.dtr)
+                               ss_ctrl |= OP_DQS_EN;
+               }
        }
 
        writel(ss_ctrl, mxic->regs + SS_CTRL(mem->spi->chip_select));
@@ -394,7 +410,10 @@ static int mxic_spi_mem_exec_op(struct spi_mem *mem,
        writel(readl(mxic->regs + HC_CFG) | HC_CFG_MAN_CS_ASSERT,
               mxic->regs + HC_CFG);
 
-       ret = mxic_spi_data_xfer(mxic, &opcode, NULL, 1);
+       for (i = 0; i < op->cmd.nbytes; i++)
+               cmd[i] = op->cmd.opcode >> (8 * (op->cmd.nbytes - i - 1));
+
+       ret = mxic_spi_data_xfer(mxic, cmd, NULL, op->cmd.nbytes);
        if (ret)
                goto out;
 
@@ -567,7 +586,8 @@ static int mxic_spi_probe(struct platform_device *pdev)
        master->bits_per_word_mask = SPI_BPW_MASK(8);
        master->mode_bits = SPI_CPOL | SPI_CPHA |
                        SPI_RX_DUAL | SPI_TX_DUAL |
-                       SPI_RX_QUAD | SPI_TX_QUAD;
+                       SPI_RX_QUAD | SPI_TX_QUAD |
+                       SPI_RX_OCTAL | SPI_TX_OCTAL;
 
        mxic_spi_hw_init(mxic);
 
index 34b31ab..e8de3cb 100644 (file)
@@ -328,8 +328,16 @@ orion_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
 static void orion_spi_set_cs(struct spi_device *spi, bool enable)
 {
        struct orion_spi *orion_spi;
+       void __iomem *ctrl_reg;
+       u32 val;
 
        orion_spi = spi_master_get_devdata(spi->master);
+       ctrl_reg = spi_reg(orion_spi, ORION_SPI_IF_CTRL_REG);
+
+       val = readl(ctrl_reg);
+
+       /* Clear existing chip-select and assertion state */
+       val &= ~(ORION_SPI_CS_MASK | 0x1);
 
        /*
         * If this line is using a GPIO to control chip select, this internal
@@ -338,9 +346,7 @@ static void orion_spi_set_cs(struct spi_device *spi, bool enable)
         * as it is handled by a GPIO, but that doesn't matter. What we need
         * is to deassert the old chip select and assert some other chip select.
         */
-       orion_spi_clrbits(orion_spi, ORION_SPI_IF_CTRL_REG, ORION_SPI_CS_MASK);
-       orion_spi_setbits(orion_spi, ORION_SPI_IF_CTRL_REG,
-                         ORION_SPI_CS(spi->chip_select));
+       val |= ORION_SPI_CS(spi->chip_select);
 
        /*
         * Chip select logic is inverted from spi_set_cs(). For lines using a
@@ -350,9 +356,13 @@ static void orion_spi_set_cs(struct spi_device *spi, bool enable)
         * doesn't matter.
         */
        if (!enable)
-               orion_spi_setbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1);
-       else
-               orion_spi_clrbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1);
+               val |= 0x1;
+
+       /*
+        * To avoid toggling unwanted chip selects update the register
+        * with a single write.
+        */
+       writel(val, ctrl_reg);
 }
 
 static inline int orion_spi_wait_till_ready(struct orion_spi *orion_spi)
index 104bde1..5eb7b61 100644 (file)
@@ -361,6 +361,7 @@ static int pic32_spi_dma_config(struct pic32_spi *pic32s, u32 dma_width)
        struct dma_slave_config cfg;
        int ret;
 
+       memset(&cfg, 0, sizeof(cfg));
        cfg.device_fc = true;
        cfg.src_addr = pic32s->dma_base + buf_offset;
        cfg.dst_addr = pic32s->dma_base + buf_offset;
index 974e307..1573f6d 100644 (file)
@@ -594,24 +594,29 @@ static int u32_reader(struct driver_data *drv_data)
 
 static void reset_sccr1(struct driver_data *drv_data)
 {
-       struct chip_data *chip =
-               spi_get_ctldata(drv_data->controller->cur_msg->spi);
-       u32 sccr1_reg;
+       u32 mask = drv_data->int_cr1 | drv_data->dma_cr1, threshold;
+       struct chip_data *chip;
+
+       if (drv_data->controller->cur_msg) {
+               chip = spi_get_ctldata(drv_data->controller->cur_msg->spi);
+               threshold = chip->threshold;
+       } else {
+               threshold = 0;
+       }
 
-       sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1) & ~drv_data->int_cr1;
        switch (drv_data->ssp_type) {
        case QUARK_X1000_SSP:
-               sccr1_reg &= ~QUARK_X1000_SSCR1_RFT;
+               mask |= QUARK_X1000_SSCR1_RFT;
                break;
        case CE4100_SSP:
-               sccr1_reg &= ~CE4100_SSCR1_RFT;
+               mask |= CE4100_SSCR1_RFT;
                break;
        default:
-               sccr1_reg &= ~SSCR1_RFT;
+               mask |= SSCR1_RFT;
                break;
        }
-       sccr1_reg |= chip->threshold;
-       pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg);
+
+       pxa2xx_spi_update(drv_data, SSCR1, mask, threshold);
 }
 
 static void int_stop_and_reset(struct driver_data *drv_data)
@@ -724,11 +729,8 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
 
 static void handle_bad_msg(struct driver_data *drv_data)
 {
+       int_stop_and_reset(drv_data);
        pxa2xx_spi_off(drv_data);
-       clear_SSCR1_bits(drv_data, drv_data->int_cr1);
-       if (!pxa25x_ssp_comp(drv_data))
-               pxa2xx_spi_write(drv_data, SSTO, 0);
-       write_SSSR_CS(drv_data, drv_data->clear_sr);
 
        dev_err(drv_data->ssp->dev, "bad message state in interrupt handler\n");
 }
@@ -1156,13 +1158,10 @@ static void pxa2xx_spi_handle_err(struct spi_controller *controller,
 {
        struct driver_data *drv_data = spi_controller_get_devdata(controller);
 
+       int_stop_and_reset(drv_data);
+
        /* Disable the SSP */
        pxa2xx_spi_off(drv_data);
-       /* Clear and disable interrupts and service requests */
-       write_SSSR_CS(drv_data, drv_data->clear_sr);
-       clear_SSCR1_bits(drv_data, drv_data->int_cr1 | drv_data->dma_cr1);
-       if (!pxa25x_ssp_comp(drv_data))
-               pxa2xx_spi_write(drv_data, SSTO, 0);
 
        /*
         * Stop the DMA if running. Note DMA callback handler may have unset
diff --git a/drivers/spi/spi-rockchip-sfc.c b/drivers/spi/spi-rockchip-sfc.c
new file mode 100644 (file)
index 0000000..a46b385
--- /dev/null
@@ -0,0 +1,694 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Rockchip Serial Flash Controller Driver
+ *
+ * Copyright (c) 2017-2021, Rockchip Inc.
+ * Author: Shawn Lin <shawn.lin@rock-chips.com>
+ *        Chris Morgan <macroalpha82@gmail.com>
+ *        Jon Lin <Jon.lin@rock-chips.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/dma-mapping.h>
+#include <linux/iopoll.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/spi/spi-mem.h>
+
+/* System control */
+#define SFC_CTRL                       0x0
+#define  SFC_CTRL_PHASE_SEL_NEGETIVE   BIT(1)
+#define  SFC_CTRL_CMD_BITS_SHIFT       8
+#define  SFC_CTRL_ADDR_BITS_SHIFT      10
+#define  SFC_CTRL_DATA_BITS_SHIFT      12
+
+/* Interrupt mask */
+#define SFC_IMR                                0x4
+#define  SFC_IMR_RX_FULL               BIT(0)
+#define  SFC_IMR_RX_UFLOW              BIT(1)
+#define  SFC_IMR_TX_OFLOW              BIT(2)
+#define  SFC_IMR_TX_EMPTY              BIT(3)
+#define  SFC_IMR_TRAN_FINISH           BIT(4)
+#define  SFC_IMR_BUS_ERR               BIT(5)
+#define  SFC_IMR_NSPI_ERR              BIT(6)
+#define  SFC_IMR_DMA                   BIT(7)
+
+/* Interrupt clear */
+#define SFC_ICLR                       0x8
+#define  SFC_ICLR_RX_FULL              BIT(0)
+#define  SFC_ICLR_RX_UFLOW             BIT(1)
+#define  SFC_ICLR_TX_OFLOW             BIT(2)
+#define  SFC_ICLR_TX_EMPTY             BIT(3)
+#define  SFC_ICLR_TRAN_FINISH          BIT(4)
+#define  SFC_ICLR_BUS_ERR              BIT(5)
+#define  SFC_ICLR_NSPI_ERR             BIT(6)
+#define  SFC_ICLR_DMA                  BIT(7)
+
+/* FIFO threshold level */
+#define SFC_FTLR                       0xc
+#define  SFC_FTLR_TX_SHIFT             0
+#define  SFC_FTLR_TX_MASK              0x1f
+#define  SFC_FTLR_RX_SHIFT             8
+#define  SFC_FTLR_RX_MASK              0x1f
+
+/* Reset FSM and FIFO */
+#define SFC_RCVR                       0x10
+#define  SFC_RCVR_RESET                        BIT(0)
+
+/* Enhanced mode */
+#define SFC_AX                         0x14
+
+/* Address Bit number */
+#define SFC_ABIT                       0x18
+
+/* Interrupt status */
+#define SFC_ISR                                0x1c
+#define  SFC_ISR_RX_FULL_SHIFT         BIT(0)
+#define  SFC_ISR_RX_UFLOW_SHIFT                BIT(1)
+#define  SFC_ISR_TX_OFLOW_SHIFT                BIT(2)
+#define  SFC_ISR_TX_EMPTY_SHIFT                BIT(3)
+#define  SFC_ISR_TX_FINISH_SHIFT       BIT(4)
+#define  SFC_ISR_BUS_ERR_SHIFT         BIT(5)
+#define  SFC_ISR_NSPI_ERR_SHIFT                BIT(6)
+#define  SFC_ISR_DMA_SHIFT             BIT(7)
+
+/* FIFO status */
+#define SFC_FSR                                0x20
+#define  SFC_FSR_TX_IS_FULL            BIT(0)
+#define  SFC_FSR_TX_IS_EMPTY           BIT(1)
+#define  SFC_FSR_RX_IS_EMPTY           BIT(2)
+#define  SFC_FSR_RX_IS_FULL            BIT(3)
+#define  SFC_FSR_TXLV_MASK             GENMASK(12, 8)
+#define  SFC_FSR_TXLV_SHIFT            8
+#define  SFC_FSR_RXLV_MASK             GENMASK(20, 16)
+#define  SFC_FSR_RXLV_SHIFT            16
+
+/* FSM status */
+#define SFC_SR                         0x24
+#define  SFC_SR_IS_IDLE                        0x0
+#define  SFC_SR_IS_BUSY                        0x1
+
+/* Raw interrupt status */
+#define SFC_RISR                       0x28
+#define  SFC_RISR_RX_FULL              BIT(0)
+#define  SFC_RISR_RX_UNDERFLOW         BIT(1)
+#define  SFC_RISR_TX_OVERFLOW          BIT(2)
+#define  SFC_RISR_TX_EMPTY             BIT(3)
+#define  SFC_RISR_TRAN_FINISH          BIT(4)
+#define  SFC_RISR_BUS_ERR              BIT(5)
+#define  SFC_RISR_NSPI_ERR             BIT(6)
+#define  SFC_RISR_DMA                  BIT(7)
+
+/* Version */
+#define SFC_VER                                0x2C
+#define  SFC_VER_3                     0x3
+#define  SFC_VER_4                     0x4
+#define  SFC_VER_5                     0x5
+
+/* Delay line controller resiter */
+#define SFC_DLL_CTRL0                  0x3C
+#define SFC_DLL_CTRL0_SCLK_SMP_DLL     BIT(15)
+#define SFC_DLL_CTRL0_DLL_MAX_VER4     0xFFU
+#define SFC_DLL_CTRL0_DLL_MAX_VER5     0x1FFU
+
+/* Master trigger */
+#define SFC_DMA_TRIGGER                        0x80
+#define SFC_DMA_TRIGGER_START          1
+
+/* Src or Dst addr for master */
+#define SFC_DMA_ADDR                   0x84
+
+/* Length control register extension 32GB */
+#define SFC_LEN_CTRL                   0x88
+#define SFC_LEN_CTRL_TRB_SEL           1
+#define SFC_LEN_EXT                    0x8C
+
+/* Command */
+#define SFC_CMD                                0x100
+#define  SFC_CMD_IDX_SHIFT             0
+#define  SFC_CMD_DUMMY_SHIFT           8
+#define  SFC_CMD_DIR_SHIFT             12
+#define  SFC_CMD_DIR_RD                        0
+#define  SFC_CMD_DIR_WR                        1
+#define  SFC_CMD_ADDR_SHIFT            14
+#define  SFC_CMD_ADDR_0BITS            0
+#define  SFC_CMD_ADDR_24BITS           1
+#define  SFC_CMD_ADDR_32BITS           2
+#define  SFC_CMD_ADDR_XBITS            3
+#define  SFC_CMD_TRAN_BYTES_SHIFT      16
+#define  SFC_CMD_CS_SHIFT              30
+
+/* Address */
+#define SFC_ADDR                       0x104
+
+/* Data */
+#define SFC_DATA                       0x108
+
+/* The controller and documentation reports that it supports up to 4 CS
+ * devices (0-3), however I have only been able to test a single CS (CS 0)
+ * due to the configuration of my device.
+ */
+#define SFC_MAX_CHIPSELECT_NUM         4
+
+/* The SFC can transfer max 16KB - 1 at one time
+ * we set it to 15.5KB here for alignment.
+ */
+#define SFC_MAX_IOSIZE_VER3            (512 * 31)
+
+/* DMA is only enabled for large data transmission */
+#define SFC_DMA_TRANS_THRETHOLD                (0x40)
+
+/* Maximum clock values from datasheet suggest keeping clock value under
+ * 150MHz. No minimum or average value is suggested.
+ */
+#define SFC_MAX_SPEED          (150 * 1000 * 1000)
+
+struct rockchip_sfc {
+       struct device *dev;
+       void __iomem *regbase;
+       struct clk *hclk;
+       struct clk *clk;
+       u32 frequency;
+       /* virtual mapped addr for dma_buffer */
+       void *buffer;
+       dma_addr_t dma_buffer;
+       struct completion cp;
+       bool use_dma;
+       u32 max_iosize;
+       u16 version;
+};
+
+static int rockchip_sfc_reset(struct rockchip_sfc *sfc)
+{
+       int err;
+       u32 status;
+
+       writel_relaxed(SFC_RCVR_RESET, sfc->regbase + SFC_RCVR);
+
+       err = readl_poll_timeout(sfc->regbase + SFC_RCVR, status,
+                                !(status & SFC_RCVR_RESET), 20,
+                                jiffies_to_usecs(HZ));
+       if (err)
+               dev_err(sfc->dev, "SFC reset never finished\n");
+
+       /* Still need to clear the masked interrupt from RISR */
+       writel_relaxed(0xFFFFFFFF, sfc->regbase + SFC_ICLR);
+
+       dev_dbg(sfc->dev, "reset\n");
+
+       return err;
+}
+
+static u16 rockchip_sfc_get_version(struct rockchip_sfc *sfc)
+{
+       return  (u16)(readl(sfc->regbase + SFC_VER) & 0xffff);
+}
+
+static u32 rockchip_sfc_get_max_iosize(struct rockchip_sfc *sfc)
+{
+       return SFC_MAX_IOSIZE_VER3;
+}
+
+static void rockchip_sfc_irq_unmask(struct rockchip_sfc *sfc, u32 mask)
+{
+       u32 reg;
+
+       /* Enable transfer complete interrupt */
+       reg = readl(sfc->regbase + SFC_IMR);
+       reg &= ~mask;
+       writel(reg, sfc->regbase + SFC_IMR);
+}
+
+static void rockchip_sfc_irq_mask(struct rockchip_sfc *sfc, u32 mask)
+{
+       u32 reg;
+
+       /* Disable transfer finish interrupt */
+       reg = readl(sfc->regbase + SFC_IMR);
+       reg |= mask;
+       writel(reg, sfc->regbase + SFC_IMR);
+}
+
+static int rockchip_sfc_init(struct rockchip_sfc *sfc)
+{
+       writel(0, sfc->regbase + SFC_CTRL);
+       writel(0xFFFFFFFF, sfc->regbase + SFC_ICLR);
+       rockchip_sfc_irq_mask(sfc, 0xFFFFFFFF);
+       if (rockchip_sfc_get_version(sfc) >= SFC_VER_4)
+               writel(SFC_LEN_CTRL_TRB_SEL, sfc->regbase + SFC_LEN_CTRL);
+
+       return 0;
+}
+
+static int rockchip_sfc_wait_txfifo_ready(struct rockchip_sfc *sfc, u32 timeout_us)
+{
+       int ret = 0;
+       u32 status;
+
+       ret = readl_poll_timeout(sfc->regbase + SFC_FSR, status,
+                                status & SFC_FSR_TXLV_MASK, 0,
+                                timeout_us);
+       if (ret) {
+               dev_dbg(sfc->dev, "sfc wait tx fifo timeout\n");
+
+               return -ETIMEDOUT;
+       }
+
+       return (status & SFC_FSR_TXLV_MASK) >> SFC_FSR_TXLV_SHIFT;
+}
+
+static int rockchip_sfc_wait_rxfifo_ready(struct rockchip_sfc *sfc, u32 timeout_us)
+{
+       int ret = 0;
+       u32 status;
+
+       ret = readl_poll_timeout(sfc->regbase + SFC_FSR, status,
+                                status & SFC_FSR_RXLV_MASK, 0,
+                                timeout_us);
+       if (ret) {
+               dev_dbg(sfc->dev, "sfc wait rx fifo timeout\n");
+
+               return -ETIMEDOUT;
+       }
+
+       return (status & SFC_FSR_RXLV_MASK) >> SFC_FSR_RXLV_SHIFT;
+}
+
+static void rockchip_sfc_adjust_op_work(struct spi_mem_op *op)
+{
+       if (unlikely(op->dummy.nbytes && !op->addr.nbytes)) {
+               /*
+                * SFC not support output DUMMY cycles right after CMD cycles, so
+                * treat it as ADDR cycles.
+                */
+               op->addr.nbytes = op->dummy.nbytes;
+               op->addr.buswidth = op->dummy.buswidth;
+               op->addr.val = 0xFFFFFFFFF;
+
+               op->dummy.nbytes = 0;
+       }
+}
+
+static int rockchip_sfc_xfer_setup(struct rockchip_sfc *sfc,
+                                  struct spi_mem *mem,
+                                  const struct spi_mem_op *op,
+                                  u32 len)
+{
+       u32 ctrl = 0, cmd = 0;
+
+       /* set CMD */
+       cmd = op->cmd.opcode;
+       ctrl |= ((op->cmd.buswidth >> 1) << SFC_CTRL_CMD_BITS_SHIFT);
+
+       /* set ADDR */
+       if (op->addr.nbytes) {
+               if (op->addr.nbytes == 4) {
+                       cmd |= SFC_CMD_ADDR_32BITS << SFC_CMD_ADDR_SHIFT;
+               } else if (op->addr.nbytes == 3) {
+                       cmd |= SFC_CMD_ADDR_24BITS << SFC_CMD_ADDR_SHIFT;
+               } else {
+                       cmd |= SFC_CMD_ADDR_XBITS << SFC_CMD_ADDR_SHIFT;
+                       writel(op->addr.nbytes * 8 - 1, sfc->regbase + SFC_ABIT);
+               }
+
+               ctrl |= ((op->addr.buswidth >> 1) << SFC_CTRL_ADDR_BITS_SHIFT);
+       }
+
+       /* set DUMMY */
+       if (op->dummy.nbytes) {
+               if (op->dummy.buswidth == 4)
+                       cmd |= op->dummy.nbytes * 2 << SFC_CMD_DUMMY_SHIFT;
+               else if (op->dummy.buswidth == 2)
+                       cmd |= op->dummy.nbytes * 4 << SFC_CMD_DUMMY_SHIFT;
+               else
+                       cmd |= op->dummy.nbytes * 8 << SFC_CMD_DUMMY_SHIFT;
+       }
+
+       /* set DATA */
+       if (sfc->version >= SFC_VER_4) /* Clear it if no data to transfer */
+               writel(len, sfc->regbase + SFC_LEN_EXT);
+       else
+               cmd |= len << SFC_CMD_TRAN_BYTES_SHIFT;
+       if (len) {
+               if (op->data.dir == SPI_MEM_DATA_OUT)
+                       cmd |= SFC_CMD_DIR_WR << SFC_CMD_DIR_SHIFT;
+
+               ctrl |= ((op->data.buswidth >> 1) << SFC_CTRL_DATA_BITS_SHIFT);
+       }
+       if (!len && op->addr.nbytes)
+               cmd |= SFC_CMD_DIR_WR << SFC_CMD_DIR_SHIFT;
+
+       /* set the Controller */
+       ctrl |= SFC_CTRL_PHASE_SEL_NEGETIVE;
+       cmd |= mem->spi->chip_select << SFC_CMD_CS_SHIFT;
+
+       dev_dbg(sfc->dev, "sfc addr.nbytes=%x(x%d) dummy.nbytes=%x(x%d)\n",
+               op->addr.nbytes, op->addr.buswidth,
+               op->dummy.nbytes, op->dummy.buswidth);
+       dev_dbg(sfc->dev, "sfc ctrl=%x cmd=%x addr=%llx len=%x\n",
+               ctrl, cmd, op->addr.val, len);
+
+       writel(ctrl, sfc->regbase + SFC_CTRL);
+       writel(cmd, sfc->regbase + SFC_CMD);
+       if (op->addr.nbytes)
+               writel(op->addr.val, sfc->regbase + SFC_ADDR);
+
+       return 0;
+}
+
+static int rockchip_sfc_write_fifo(struct rockchip_sfc *sfc, const u8 *buf, int len)
+{
+       u8 bytes = len & 0x3;
+       u32 dwords;
+       int tx_level;
+       u32 write_words;
+       u32 tmp = 0;
+
+       dwords = len >> 2;
+       while (dwords) {
+               tx_level = rockchip_sfc_wait_txfifo_ready(sfc, 1000);
+               if (tx_level < 0)
+                       return tx_level;
+               write_words = min_t(u32, tx_level, dwords);
+               iowrite32_rep(sfc->regbase + SFC_DATA, buf, write_words);
+               buf += write_words << 2;
+               dwords -= write_words;
+       }
+
+       /* write the rest non word aligned bytes */
+       if (bytes) {
+               tx_level = rockchip_sfc_wait_txfifo_ready(sfc, 1000);
+               if (tx_level < 0)
+                       return tx_level;
+               memcpy(&tmp, buf, bytes);
+               writel(tmp, sfc->regbase + SFC_DATA);
+       }
+
+       return len;
+}
+
+static int rockchip_sfc_read_fifo(struct rockchip_sfc *sfc, u8 *buf, int len)
+{
+       u8 bytes = len & 0x3;
+       u32 dwords;
+       u8 read_words;
+       int rx_level;
+       int tmp;
+
+       /* word aligned access only */
+       dwords = len >> 2;
+       while (dwords) {
+               rx_level = rockchip_sfc_wait_rxfifo_ready(sfc, 1000);
+               if (rx_level < 0)
+                       return rx_level;
+               read_words = min_t(u32, rx_level, dwords);
+               ioread32_rep(sfc->regbase + SFC_DATA, buf, read_words);
+               buf += read_words << 2;
+               dwords -= read_words;
+       }
+
+       /* read the rest non word aligned bytes */
+       if (bytes) {
+               rx_level = rockchip_sfc_wait_rxfifo_ready(sfc, 1000);
+               if (rx_level < 0)
+                       return rx_level;
+               tmp = readl(sfc->regbase + SFC_DATA);
+               memcpy(buf, &tmp, bytes);
+       }
+
+       return len;
+}
+
+static int rockchip_sfc_fifo_transfer_dma(struct rockchip_sfc *sfc, dma_addr_t dma_buf, size_t len)
+{
+       writel(0xFFFFFFFF, sfc->regbase + SFC_ICLR);
+       writel((u32)dma_buf, sfc->regbase + SFC_DMA_ADDR);
+       writel(SFC_DMA_TRIGGER_START, sfc->regbase + SFC_DMA_TRIGGER);
+
+       return len;
+}
+
+static int rockchip_sfc_xfer_data_poll(struct rockchip_sfc *sfc,
+                                      const struct spi_mem_op *op, u32 len)
+{
+       dev_dbg(sfc->dev, "sfc xfer_poll len=%x\n", len);
+
+       if (op->data.dir == SPI_MEM_DATA_OUT)
+               return rockchip_sfc_write_fifo(sfc, op->data.buf.out, len);
+       else
+               return rockchip_sfc_read_fifo(sfc, op->data.buf.in, len);
+}
+
+static int rockchip_sfc_xfer_data_dma(struct rockchip_sfc *sfc,
+                                     const struct spi_mem_op *op, u32 len)
+{
+       int ret;
+
+       dev_dbg(sfc->dev, "sfc xfer_dma len=%x\n", len);
+
+       if (op->data.dir == SPI_MEM_DATA_OUT)
+               memcpy(sfc->buffer, op->data.buf.out, len);
+
+       ret = rockchip_sfc_fifo_transfer_dma(sfc, sfc->dma_buffer, len);
+       if (!wait_for_completion_timeout(&sfc->cp, msecs_to_jiffies(2000))) {
+               dev_err(sfc->dev, "DMA wait for transfer finish timeout\n");
+               ret = -ETIMEDOUT;
+       }
+       rockchip_sfc_irq_mask(sfc, SFC_IMR_DMA);
+       if (op->data.dir == SPI_MEM_DATA_IN)
+               memcpy(op->data.buf.in, sfc->buffer, len);
+
+       return ret;
+}
+
+static int rockchip_sfc_xfer_done(struct rockchip_sfc *sfc, u32 timeout_us)
+{
+       int ret = 0;
+       u32 status;
+
+       ret = readl_poll_timeout(sfc->regbase + SFC_SR, status,
+                                !(status & SFC_SR_IS_BUSY),
+                                20, timeout_us);
+       if (ret) {
+               dev_err(sfc->dev, "wait sfc idle timeout\n");
+               rockchip_sfc_reset(sfc);
+
+               ret = -EIO;
+       }
+
+       return ret;
+}
+
+static int rockchip_sfc_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+       struct rockchip_sfc *sfc = spi_master_get_devdata(mem->spi->master);
+       u32 len = op->data.nbytes;
+       int ret;
+
+       if (unlikely(mem->spi->max_speed_hz != sfc->frequency)) {
+               ret = clk_set_rate(sfc->clk, mem->spi->max_speed_hz);
+               if (ret)
+                       return ret;
+               sfc->frequency = mem->spi->max_speed_hz;
+               dev_dbg(sfc->dev, "set_freq=%dHz real_freq=%ldHz\n",
+                       sfc->frequency, clk_get_rate(sfc->clk));
+       }
+
+       rockchip_sfc_adjust_op_work((struct spi_mem_op *)op);
+       rockchip_sfc_xfer_setup(sfc, mem, op, len);
+       if (len) {
+               if (likely(sfc->use_dma) && len >= SFC_DMA_TRANS_THRETHOLD) {
+                       init_completion(&sfc->cp);
+                       rockchip_sfc_irq_unmask(sfc, SFC_IMR_DMA);
+                       ret = rockchip_sfc_xfer_data_dma(sfc, op, len);
+               } else {
+                       ret = rockchip_sfc_xfer_data_poll(sfc, op, len);
+               }
+
+               if (ret != len) {
+                       dev_err(sfc->dev, "xfer data failed ret %d dir %d\n", ret, op->data.dir);
+
+                       return -EIO;
+               }
+       }
+
+       return rockchip_sfc_xfer_done(sfc, 100000);
+}
+
+static int rockchip_sfc_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
+{
+       struct rockchip_sfc *sfc = spi_master_get_devdata(mem->spi->master);
+
+       op->data.nbytes = min(op->data.nbytes, sfc->max_iosize);
+
+       return 0;
+}
+
+static const struct spi_controller_mem_ops rockchip_sfc_mem_ops = {
+       .exec_op = rockchip_sfc_exec_mem_op,
+       .adjust_op_size = rockchip_sfc_adjust_op_size,
+};
+
+static irqreturn_t rockchip_sfc_irq_handler(int irq, void *dev_id)
+{
+       struct rockchip_sfc *sfc = dev_id;
+       u32 reg;
+
+       reg = readl(sfc->regbase + SFC_RISR);
+
+       /* Clear interrupt */
+       writel_relaxed(reg, sfc->regbase + SFC_ICLR);
+
+       if (reg & SFC_RISR_DMA) {
+               complete(&sfc->cp);
+
+               return IRQ_HANDLED;
+       }
+
+       return IRQ_NONE;
+}
+
+static int rockchip_sfc_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct spi_master *master;
+       struct resource *res;
+       struct rockchip_sfc *sfc;
+       int ret;
+
+       master = devm_spi_alloc_master(&pdev->dev, sizeof(*sfc));
+       if (!master)
+               return -ENOMEM;
+
+       master->flags = SPI_MASTER_HALF_DUPLEX;
+       master->mem_ops = &rockchip_sfc_mem_ops;
+       master->dev.of_node = pdev->dev.of_node;
+       master->mode_bits = SPI_TX_QUAD | SPI_TX_DUAL | SPI_RX_QUAD | SPI_RX_DUAL;
+       master->max_speed_hz = SFC_MAX_SPEED;
+       master->num_chipselect = SFC_MAX_CHIPSELECT_NUM;
+
+       sfc = spi_master_get_devdata(master);
+       sfc->dev = dev;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       sfc->regbase = devm_ioremap_resource(dev, res);
+       if (IS_ERR(sfc->regbase))
+               return PTR_ERR(sfc->regbase);
+
+       sfc->clk = devm_clk_get(&pdev->dev, "clk_sfc");
+       if (IS_ERR(sfc->clk)) {
+               dev_err(&pdev->dev, "Failed to get sfc interface clk\n");
+               return PTR_ERR(sfc->clk);
+       }
+
+       sfc->hclk = devm_clk_get(&pdev->dev, "hclk_sfc");
+       if (IS_ERR(sfc->hclk)) {
+               dev_err(&pdev->dev, "Failed to get sfc ahb clk\n");
+               return PTR_ERR(sfc->hclk);
+       }
+
+       sfc->use_dma = !of_property_read_bool(sfc->dev->of_node,
+                                             "rockchip,sfc-no-dma");
+
+       if (sfc->use_dma) {
+               ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+               if (ret) {
+                       dev_warn(dev, "Unable to set dma mask\n");
+                       return ret;
+               }
+
+               sfc->buffer = dmam_alloc_coherent(dev, SFC_MAX_IOSIZE_VER3,
+                                                 &sfc->dma_buffer,
+                                                 GFP_KERNEL);
+               if (!sfc->buffer)
+                       return -ENOMEM;
+       }
+
+       ret = clk_prepare_enable(sfc->hclk);
+       if (ret) {
+               dev_err(&pdev->dev, "Failed to enable ahb clk\n");
+               goto err_hclk;
+       }
+
+       ret = clk_prepare_enable(sfc->clk);
+       if (ret) {
+               dev_err(&pdev->dev, "Failed to enable interface clk\n");
+               goto err_clk;
+       }
+
+       /* Find the irq */
+       ret = platform_get_irq(pdev, 0);
+       if (ret < 0) {
+               dev_err(dev, "Failed to get the irq\n");
+               goto err_irq;
+       }
+
+       ret = devm_request_irq(dev, ret, rockchip_sfc_irq_handler,
+                              0, pdev->name, sfc);
+       if (ret) {
+               dev_err(dev, "Failed to request irq\n");
+
+               return ret;
+       }
+
+       ret = rockchip_sfc_init(sfc);
+       if (ret)
+               goto err_irq;
+
+       sfc->max_iosize = rockchip_sfc_get_max_iosize(sfc);
+       sfc->version = rockchip_sfc_get_version(sfc);
+
+       ret = spi_register_master(master);
+       if (ret)
+               goto err_irq;
+
+       return 0;
+
+err_irq:
+       clk_disable_unprepare(sfc->clk);
+err_clk:
+       clk_disable_unprepare(sfc->hclk);
+err_hclk:
+       return ret;
+}
+
+static int rockchip_sfc_remove(struct platform_device *pdev)
+{
+       struct spi_master *master = platform_get_drvdata(pdev);
+       struct rockchip_sfc *sfc = platform_get_drvdata(pdev);
+
+       spi_unregister_master(master);
+
+       clk_disable_unprepare(sfc->clk);
+       clk_disable_unprepare(sfc->hclk);
+
+       return 0;
+}
+
+static const struct of_device_id rockchip_sfc_dt_ids[] = {
+       { .compatible = "rockchip,sfc"},
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rockchip_sfc_dt_ids);
+
+static struct platform_driver rockchip_sfc_driver = {
+       .driver = {
+               .name   = "rockchip-sfc",
+               .of_match_table = rockchip_sfc_dt_ids,
+       },
+       .probe  = rockchip_sfc_probe,
+       .remove = rockchip_sfc_remove,
+};
+module_platform_driver(rockchip_sfc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Rockchip Serial Flash Controller Driver");
+MODULE_AUTHOR("Shawn Lin <shawn.lin@rock-chips.com>");
+MODULE_AUTHOR("Chris Morgan <macromorgan@hotmail.com>");
+MODULE_AUTHOR("Jon Lin <Jon.lin@rock-chips.com>");
index ab19068..1edbf44 100644 (file)
 
 /*
  * ADI slave devices include RTC, ADC, regulator, charger, thermal and so on.
- * The slave devices address offset is always 0x8000 and size is 4K.
+ * ADI supports 12/14bit address for r2p0, and additional 17bit for r3p0 or
+ * later versions. Since bit[1:0] are zero, so the spec describe them as
+ * 10/12/15bit address mode.
+ * The 10bit mode supports sigle slave, 12/15bit mode supports 3 slave, the
+ * high two bits is slave_id.
+ * The slave devices address offset is 0x8000 for 10/12bit address mode,
+ * and 0x20000 for 15bit mode.
  */
-#define ADI_SLAVE_ADDR_SIZE            SZ_4K
-#define ADI_SLAVE_OFFSET               0x8000
+#define ADI_10BIT_SLAVE_ADDR_SIZE      SZ_4K
+#define ADI_10BIT_SLAVE_OFFSET         0x8000
+#define ADI_12BIT_SLAVE_ADDR_SIZE      SZ_16K
+#define ADI_12BIT_SLAVE_OFFSET         0x8000
+#define ADI_15BIT_SLAVE_ADDR_SIZE      SZ_128K
+#define ADI_15BIT_SLAVE_OFFSET         0x20000
 
 /* Timeout (ms) for the trylock of hardware spinlocks */
 #define ADI_HWSPINLOCK_TIMEOUT         5000
 
 #define ADI_FIFO_DRAIN_TIMEOUT         1000
 #define ADI_READ_TIMEOUT               2000
-#define REG_ADDR_LOW_MASK              GENMASK(11, 0)
+
+/*
+ * Read back address from REG_ADI_RD_DATA bit[30:16] which maps to:
+ * REG_ADI_RD_CMD bit[14:0] for r2p0
+ * REG_ADI_RD_CMD bit[16:2] for r3p0
+ */
+#define RDBACK_ADDR_MASK_R2            GENMASK(14, 0)
+#define RDBACK_ADDR_MASK_R3            GENMASK(16, 2)
+#define RDBACK_ADDR_SHIFT_R3           2
 
 /* Registers definitions for PMIC watchdog controller */
-#define REG_WDG_LOAD_LOW               0x80
-#define REG_WDG_LOAD_HIGH              0x84
-#define REG_WDG_CTRL                   0x88
-#define REG_WDG_LOCK                   0xa0
+#define REG_WDG_LOAD_LOW               0x0
+#define REG_WDG_LOAD_HIGH              0x4
+#define REG_WDG_CTRL                   0x8
+#define REG_WDG_LOCK                   0x20
 
 /* Bits definitions for register REG_WDG_CTRL */
 #define BIT_WDG_RUN                    BIT(1)
 #define BIT_WDG_NEW                    BIT(2)
 #define BIT_WDG_RST                    BIT(3)
 
+/* Bits definitions for register REG_MODULE_EN */
+#define BIT_WDG_EN                     BIT(2)
+
 /* Registers definitions for PMIC */
 #define PMIC_RST_STATUS                        0xee8
 #define PMIC_MODULE_EN                 0xc08
 #define PMIC_CLK_EN                    0xc18
-#define BIT_WDG_EN                     BIT(2)
+#define PMIC_WDG_BASE                  0x80
 
 /* Definition of PMIC reset status register */
 #define HWRST_STATUS_SECURITY          0x02
 #define HWRST_STATUS_WATCHDOG          0xf0
 
 /* Use default timeout 50 ms that converts to watchdog values */
-#define WDG_LOAD_VAL                   ((50 * 1000) / 32768)
+#define WDG_LOAD_VAL                   ((50 * 32768) / 1000)
 #define WDG_LOAD_MASK                  GENMASK(15, 0)
 #define WDG_UNLOCK_KEY                 0xe551
 
+struct sprd_adi_wdg {
+       u32 base;
+       u32 rst_sts;
+       u32 wdg_en;
+       u32 wdg_clk;
+};
+
+struct sprd_adi_data {
+       u32 slave_offset;
+       u32 slave_addr_size;
+       int (*read_check)(u32 val, u32 reg);
+       int (*restart)(struct notifier_block *this,
+                      unsigned long mode, void *cmd);
+       void (*wdg_rst)(void *p);
+};
+
 struct sprd_adi {
        struct spi_controller   *ctlr;
        struct device           *dev;
@@ -115,26 +152,21 @@ struct sprd_adi {
        unsigned long           slave_vbase;
        unsigned long           slave_pbase;
        struct notifier_block   restart_handler;
+       const struct sprd_adi_data *data;
 };
 
-static int sprd_adi_check_paddr(struct sprd_adi *sadi, u32 paddr)
+static int sprd_adi_check_addr(struct sprd_adi *sadi, u32 reg)
 {
-       if (paddr < sadi->slave_pbase || paddr >
-           (sadi->slave_pbase + ADI_SLAVE_ADDR_SIZE)) {
+       if (reg >= sadi->data->slave_addr_size) {
                dev_err(sadi->dev,
-                       "slave physical address is incorrect, addr = 0x%x\n",
-                       paddr);
+                       "slave address offset is incorrect, reg = 0x%x\n",
+                       reg);
                return -EINVAL;
        }
 
        return 0;
 }
 
-static unsigned long sprd_adi_to_vaddr(struct sprd_adi *sadi, u32 paddr)
-{
-       return (paddr - sadi->slave_pbase + sadi->slave_vbase);
-}
-
 static int sprd_adi_drain_fifo(struct sprd_adi *sadi)
 {
        u32 timeout = ADI_FIFO_DRAIN_TIMEOUT;
@@ -161,11 +193,35 @@ static int sprd_adi_fifo_is_full(struct sprd_adi *sadi)
        return readl_relaxed(sadi->base + REG_ADI_ARM_FIFO_STS) & BIT_FIFO_FULL;
 }
 
-static int sprd_adi_read(struct sprd_adi *sadi, u32 reg_paddr, u32 *read_val)
+static int sprd_adi_read_check(u32 val, u32 addr)
+{
+       u32 rd_addr;
+
+       rd_addr = (val & RD_ADDR_MASK) >> RD_ADDR_SHIFT;
+
+       if (rd_addr != addr) {
+               pr_err("ADI read error, addr = 0x%x, val = 0x%x\n", addr, val);
+               return -EIO;
+       }
+
+       return 0;
+}
+
+static int sprd_adi_read_check_r2(u32 val, u32 reg)
+{
+       return sprd_adi_read_check(val, reg & RDBACK_ADDR_MASK_R2);
+}
+
+static int sprd_adi_read_check_r3(u32 val, u32 reg)
+{
+       return sprd_adi_read_check(val, (reg & RDBACK_ADDR_MASK_R3) >> RDBACK_ADDR_SHIFT_R3);
+}
+
+static int sprd_adi_read(struct sprd_adi *sadi, u32 reg, u32 *read_val)
 {
        int read_timeout = ADI_READ_TIMEOUT;
        unsigned long flags;
-       u32 val, rd_addr;
+       u32 val;
        int ret = 0;
 
        if (sadi->hwlock) {
@@ -178,11 +234,15 @@ static int sprd_adi_read(struct sprd_adi *sadi, u32 reg_paddr, u32 *read_val)
                }
        }
 
+       ret = sprd_adi_check_addr(sadi, reg);
+       if (ret)
+               goto out;
+
        /*
-        * Set the physical register address need to read into RD_CMD register,
+        * Set the slave address offset need to read into RD_CMD register,
         * then ADI controller will start to transfer automatically.
         */
-       writel_relaxed(reg_paddr, sadi->base + REG_ADI_RD_CMD);
+       writel_relaxed(reg, sadi->base + REG_ADI_RD_CMD);
 
        /*
         * Wait read operation complete, the BIT_RD_CMD_BUSY will be set
@@ -205,18 +265,15 @@ static int sprd_adi_read(struct sprd_adi *sadi, u32 reg_paddr, u32 *read_val)
        }
 
        /*
-        * The return value includes data and read register address, from bit 0
-        * to bit 15 are data, and from bit 16 to bit 30 are read register
-        * address. Then we can check the returned register address to validate
-        * data.
+        * The return value before adi r5p0 includes data and read register
+        * address, from bit 0to bit 15 are data, and from bit 16 to bit 30
+        * are read register address. Then we can check the returned register
+        * address to validate data.
         */
-       rd_addr = (val & RD_ADDR_MASK) >> RD_ADDR_SHIFT;
-
-       if (rd_addr != (reg_paddr & REG_ADDR_LOW_MASK)) {
-               dev_err(sadi->dev, "read error, reg addr = 0x%x, val = 0x%x\n",
-                       reg_paddr, val);
-               ret = -EIO;
-               goto out;
+       if (sadi->data->read_check) {
+               ret = sadi->data->read_check(val, reg);
+               if (ret < 0)
+                       goto out;
        }
 
        *read_val = val & RD_VALUE_MASK;
@@ -227,9 +284,8 @@ out:
        return ret;
 }
 
-static int sprd_adi_write(struct sprd_adi *sadi, u32 reg_paddr, u32 val)
+static int sprd_adi_write(struct sprd_adi *sadi, u32 reg, u32 val)
 {
-       unsigned long reg = sprd_adi_to_vaddr(sadi, reg_paddr);
        u32 timeout = ADI_FIFO_DRAIN_TIMEOUT;
        unsigned long flags;
        int ret;
@@ -244,6 +300,10 @@ static int sprd_adi_write(struct sprd_adi *sadi, u32 reg_paddr, u32 val)
                }
        }
 
+       ret = sprd_adi_check_addr(sadi, reg);
+       if (ret)
+               goto out;
+
        ret = sprd_adi_drain_fifo(sadi);
        if (ret < 0)
                goto out;
@@ -254,7 +314,8 @@ static int sprd_adi_write(struct sprd_adi *sadi, u32 reg_paddr, u32 val)
         */
        do {
                if (!sprd_adi_fifo_is_full(sadi)) {
-                       writel_relaxed(val, (void __iomem *)reg);
+                       /* we need virtual register address to write. */
+                       writel_relaxed(val, (void __iomem *)(sadi->slave_vbase + reg));
                        break;
                }
 
@@ -277,60 +338,41 @@ static int sprd_adi_transfer_one(struct spi_controller *ctlr,
                                 struct spi_transfer *t)
 {
        struct sprd_adi *sadi = spi_controller_get_devdata(ctlr);
-       u32 phy_reg, val;
+       u32 reg, val;
        int ret;
 
        if (t->rx_buf) {
-               phy_reg = *(u32 *)t->rx_buf + sadi->slave_pbase;
-
-               ret = sprd_adi_check_paddr(sadi, phy_reg);
-               if (ret)
-                       return ret;
-
-               ret = sprd_adi_read(sadi, phy_reg, &val);
-               if (ret)
-                       return ret;
-
+               reg = *(u32 *)t->rx_buf;
+               ret = sprd_adi_read(sadi, reg, &val);
                *(u32 *)t->rx_buf = val;
        } else if (t->tx_buf) {
                u32 *p = (u32 *)t->tx_buf;
-
-               /*
-                * Get the physical register address need to write and convert
-                * the physical address to virtual address. Since we need
-                * virtual register address to write.
-                */
-               phy_reg = *p++ + sadi->slave_pbase;
-               ret = sprd_adi_check_paddr(sadi, phy_reg);
-               if (ret)
-                       return ret;
-
+               reg = *p++;
                val = *p;
-               ret = sprd_adi_write(sadi, phy_reg, val);
-               if (ret)
-                       return ret;
+               ret = sprd_adi_write(sadi, reg, val);
        } else {
                dev_err(sadi->dev, "no buffer for transfer\n");
-               return -EINVAL;
+               ret = -EINVAL;
        }
 
-       return 0;
+       return ret;
 }
 
-static void sprd_adi_set_wdt_rst_mode(struct sprd_adi *sadi)
+static void sprd_adi_set_wdt_rst_mode(void *p)
 {
 #if IS_ENABLED(CONFIG_SPRD_WATCHDOG)
        u32 val;
+       struct sprd_adi *sadi = (struct sprd_adi *)p;
 
-       /* Set default watchdog reboot mode */
-       sprd_adi_read(sadi, sadi->slave_pbase + PMIC_RST_STATUS, &val);
+       /* Init watchdog reset mode */
+       sprd_adi_read(sadi, PMIC_RST_STATUS, &val);
        val |= HWRST_STATUS_WATCHDOG;
-       sprd_adi_write(sadi, sadi->slave_pbase + PMIC_RST_STATUS, val);
+       sprd_adi_write(sadi, PMIC_RST_STATUS, val);
 #endif
 }
 
-static int sprd_adi_restart_handler(struct notifier_block *this,
-                                   unsigned long mode, void *cmd)
+static int sprd_adi_restart(struct notifier_block *this, unsigned long mode,
+                                 void *cmd, struct sprd_adi_wdg *wdg)
 {
        struct sprd_adi *sadi = container_of(this, struct sprd_adi,
                                             restart_handler);
@@ -366,40 +408,40 @@ static int sprd_adi_restart_handler(struct notifier_block *this,
                reboot_mode = HWRST_STATUS_NORMAL;
 
        /* Record the reboot mode */
-       sprd_adi_read(sadi, sadi->slave_pbase + PMIC_RST_STATUS, &val);
+       sprd_adi_read(sadi, wdg->rst_sts, &val);
        val &= ~HWRST_STATUS_WATCHDOG;
        val |= reboot_mode;
-       sprd_adi_write(sadi, sadi->slave_pbase + PMIC_RST_STATUS, val);
+       sprd_adi_write(sadi, wdg->rst_sts, val);
 
        /* Enable the interface clock of the watchdog */
-       sprd_adi_read(sadi, sadi->slave_pbase + PMIC_MODULE_EN, &val);
+       sprd_adi_read(sadi, wdg->wdg_en, &val);
        val |= BIT_WDG_EN;
-       sprd_adi_write(sadi, sadi->slave_pbase + PMIC_MODULE_EN, val);
+       sprd_adi_write(sadi, wdg->wdg_en, val);
 
        /* Enable the work clock of the watchdog */
-       sprd_adi_read(sadi, sadi->slave_pbase + PMIC_CLK_EN, &val);
+       sprd_adi_read(sadi, wdg->wdg_clk, &val);
        val |= BIT_WDG_EN;
-       sprd_adi_write(sadi, sadi->slave_pbase + PMIC_CLK_EN, val);
+       sprd_adi_write(sadi, wdg->wdg_clk, val);
 
        /* Unlock the watchdog */
-       sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOCK, WDG_UNLOCK_KEY);
+       sprd_adi_write(sadi, wdg->base + REG_WDG_LOCK, WDG_UNLOCK_KEY);
 
-       sprd_adi_read(sadi, sadi->slave_pbase + REG_WDG_CTRL, &val);
+       sprd_adi_read(sadi, wdg->base + REG_WDG_CTRL, &val);
        val |= BIT_WDG_NEW;
-       sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_CTRL, val);
+       sprd_adi_write(sadi, wdg->base + REG_WDG_CTRL, val);
 
        /* Load the watchdog timeout value, 50ms is always enough. */
-       sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOAD_HIGH, 0);
-       sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOAD_LOW,
+       sprd_adi_write(sadi, wdg->base + REG_WDG_LOAD_HIGH, 0);
+       sprd_adi_write(sadi, wdg->base + REG_WDG_LOAD_LOW,
                       WDG_LOAD_VAL & WDG_LOAD_MASK);
 
        /* Start the watchdog to reset system */
-       sprd_adi_read(sadi, sadi->slave_pbase + REG_WDG_CTRL, &val);
+       sprd_adi_read(sadi, wdg->base + REG_WDG_CTRL, &val);
        val |= BIT_WDG_RUN | BIT_WDG_RST;
-       sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_CTRL, val);
+       sprd_adi_write(sadi, wdg->base + REG_WDG_CTRL, val);
 
        /* Lock the watchdog */
-       sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOCK, ~WDG_UNLOCK_KEY);
+       sprd_adi_write(sadi, wdg->base + REG_WDG_LOCK, ~WDG_UNLOCK_KEY);
 
        mdelay(1000);
 
@@ -407,6 +449,19 @@ static int sprd_adi_restart_handler(struct notifier_block *this,
        return NOTIFY_DONE;
 }
 
+static int sprd_adi_restart_sc9860(struct notifier_block *this,
+                                          unsigned long mode, void *cmd)
+{
+       struct sprd_adi_wdg wdg = {
+               .base = PMIC_WDG_BASE,
+               .rst_sts = PMIC_RST_STATUS,
+               .wdg_en = PMIC_MODULE_EN,
+               .wdg_clk = PMIC_CLK_EN,
+       };
+
+       return sprd_adi_restart(this, mode, cmd, &wdg);
+}
+
 static void sprd_adi_hw_init(struct sprd_adi *sadi)
 {
        struct device_node *np = sadi->dev->of_node;
@@ -458,10 +513,11 @@ static void sprd_adi_hw_init(struct sprd_adi *sadi)
 static int sprd_adi_probe(struct platform_device *pdev)
 {
        struct device_node *np = pdev->dev.of_node;
+       const struct sprd_adi_data *data;
        struct spi_controller *ctlr;
        struct sprd_adi *sadi;
        struct resource *res;
-       u32 num_chipselect;
+       u16 num_chipselect;
        int ret;
 
        if (!np) {
@@ -469,6 +525,12 @@ static int sprd_adi_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
+       data = of_device_get_match_data(&pdev->dev);
+       if (!data) {
+               dev_err(&pdev->dev, "no matching driver data found\n");
+               return -EINVAL;
+       }
+
        pdev->id = of_alias_get_id(np, "spi");
        num_chipselect = of_get_child_count(np);
 
@@ -486,10 +548,12 @@ static int sprd_adi_probe(struct platform_device *pdev)
                goto put_ctlr;
        }
 
-       sadi->slave_vbase = (unsigned long)sadi->base + ADI_SLAVE_OFFSET;
-       sadi->slave_pbase = res->start + ADI_SLAVE_OFFSET;
+       sadi->slave_vbase = (unsigned long)sadi->base +
+                           data->slave_offset;
+       sadi->slave_pbase = res->start + data->slave_offset;
        sadi->ctlr = ctlr;
        sadi->dev = &pdev->dev;
+       sadi->data = data;
        ret = of_hwspin_lock_get_id(np, 0);
        if (ret > 0 || (IS_ENABLED(CONFIG_HWSPINLOCK) && ret == 0)) {
                sadi->hwlock =
@@ -510,7 +574,9 @@ static int sprd_adi_probe(struct platform_device *pdev)
        }
 
        sprd_adi_hw_init(sadi);
-       sprd_adi_set_wdt_rst_mode(sadi);
+
+       if (sadi->data->wdg_rst)
+               sadi->data->wdg_rst(sadi);
 
        ctlr->dev.of_node = pdev->dev.of_node;
        ctlr->bus_num = pdev->id;
@@ -525,12 +591,14 @@ static int sprd_adi_probe(struct platform_device *pdev)
                goto put_ctlr;
        }
 
-       sadi->restart_handler.notifier_call = sprd_adi_restart_handler;
-       sadi->restart_handler.priority = 128;
-       ret = register_restart_handler(&sadi->restart_handler);
-       if (ret) {
-               dev_err(&pdev->dev, "can not register restart handler\n");
-               goto put_ctlr;
+       if (sadi->data->restart) {
+               sadi->restart_handler.notifier_call = sadi->data->restart;
+               sadi->restart_handler.priority = 128;
+               ret = register_restart_handler(&sadi->restart_handler);
+               if (ret) {
+                       dev_err(&pdev->dev, "can not register restart handler\n");
+                       goto put_ctlr;
+               }
        }
 
        return 0;
@@ -549,9 +617,38 @@ static int sprd_adi_remove(struct platform_device *pdev)
        return 0;
 }
 
+static struct sprd_adi_data sc9860_data = {
+       .slave_offset = ADI_10BIT_SLAVE_OFFSET,
+       .slave_addr_size = ADI_10BIT_SLAVE_ADDR_SIZE,
+       .read_check = sprd_adi_read_check_r2,
+       .restart = sprd_adi_restart_sc9860,
+       .wdg_rst = sprd_adi_set_wdt_rst_mode,
+};
+
+static struct sprd_adi_data sc9863_data = {
+       .slave_offset = ADI_12BIT_SLAVE_OFFSET,
+       .slave_addr_size = ADI_12BIT_SLAVE_ADDR_SIZE,
+       .read_check = sprd_adi_read_check_r3,
+};
+
+static struct sprd_adi_data ums512_data = {
+       .slave_offset = ADI_15BIT_SLAVE_OFFSET,
+       .slave_addr_size = ADI_15BIT_SLAVE_ADDR_SIZE,
+       .read_check = sprd_adi_read_check_r3,
+};
+
 static const struct of_device_id sprd_adi_of_match[] = {
        {
                .compatible = "sprd,sc9860-adi",
+               .data = &sc9860_data,
+       },
+       {
+               .compatible = "sprd,sc9863-adi",
+               .data = &sc9863_data,
+       },
+       {
+               .compatible = "sprd,ums512-adi",
+               .data = &ums512_data,
        },
        { },
 };
index 05618a6..9bd3fd1 100644 (file)
 #define SPI_3WIRE_TX           3
 #define SPI_3WIRE_RX           4
 
+#define STM32_SPI_AUTOSUSPEND_DELAY            1       /* 1 ms */
+
 /*
  * use PIO for small transfers, avoiding DMA setup/teardown overhead for drivers
  * without fifo buffers.
@@ -568,29 +570,30 @@ static void stm32f4_spi_read_rx(struct stm32_spi *spi)
 /**
  * stm32h7_spi_read_rxfifo - Read bytes in Receive Data Register
  * @spi: pointer to the spi controller data structure
- * @flush: boolean indicating that FIFO should be flushed
  *
  * Write in rx_buf depends on remaining bytes to avoid to write beyond
  * rx_buf end.
  */
-static void stm32h7_spi_read_rxfifo(struct stm32_spi *spi, bool flush)
+static void stm32h7_spi_read_rxfifo(struct stm32_spi *spi)
 {
        u32 sr = readl_relaxed(spi->base + STM32H7_SPI_SR);
        u32 rxplvl = FIELD_GET(STM32H7_SPI_SR_RXPLVL, sr);
 
        while ((spi->rx_len > 0) &&
               ((sr & STM32H7_SPI_SR_RXP) ||
-               (flush && ((sr & STM32H7_SPI_SR_RXWNE) || (rxplvl > 0))))) {
+               ((sr & STM32H7_SPI_SR_EOT) &&
+                ((sr & STM32H7_SPI_SR_RXWNE) || (rxplvl > 0))))) {
                u32 offs = spi->cur_xferlen - spi->rx_len;
 
                if ((spi->rx_len >= sizeof(u32)) ||
-                   (flush && (sr & STM32H7_SPI_SR_RXWNE))) {
+                   (sr & STM32H7_SPI_SR_RXWNE)) {
                        u32 *rx_buf32 = (u32 *)(spi->rx_buf + offs);
 
                        *rx_buf32 = readl_relaxed(spi->base + STM32H7_SPI_RXDR);
                        spi->rx_len -= sizeof(u32);
                } else if ((spi->rx_len >= sizeof(u16)) ||
-                          (flush && (rxplvl >= 2 || spi->cur_bpw > 8))) {
+                          (!(sr & STM32H7_SPI_SR_RXWNE) &&
+                           (rxplvl >= 2 || spi->cur_bpw > 8))) {
                        u16 *rx_buf16 = (u16 *)(spi->rx_buf + offs);
 
                        *rx_buf16 = readw_relaxed(spi->base + STM32H7_SPI_RXDR);
@@ -606,8 +609,8 @@ static void stm32h7_spi_read_rxfifo(struct stm32_spi *spi, bool flush)
                rxplvl = FIELD_GET(STM32H7_SPI_SR_RXPLVL, sr);
        }
 
-       dev_dbg(spi->dev, "%s%s: %d bytes left\n", __func__,
-               flush ? "(flush)" : "", spi->rx_len);
+       dev_dbg(spi->dev, "%s: %d bytes left (sr=%08x)\n",
+               __func__, spi->rx_len, sr);
 }
 
 /**
@@ -674,18 +677,12 @@ static void stm32f4_spi_disable(struct stm32_spi *spi)
  * stm32h7_spi_disable - Disable SPI controller
  * @spi: pointer to the spi controller data structure
  *
- * RX-Fifo is flushed when SPI controller is disabled. To prevent any data
- * loss, use stm32h7_spi_read_rxfifo(flush) to read the remaining bytes in
- * RX-Fifo.
- * Normally, if TSIZE has been configured, we should relax the hardware at the
- * reception of the EOT interrupt. But in case of error, EOT will not be
- * raised. So the subsystem unprepare_message call allows us to properly
- * complete the transfer from an hardware point of view.
+ * RX-Fifo is flushed when SPI controller is disabled.
  */
 static void stm32h7_spi_disable(struct stm32_spi *spi)
 {
        unsigned long flags;
-       u32 cr1, sr;
+       u32 cr1;
 
        dev_dbg(spi->dev, "disable controller\n");
 
@@ -698,25 +695,6 @@ static void stm32h7_spi_disable(struct stm32_spi *spi)
                return;
        }
 
-       /* Wait on EOT or suspend the flow */
-       if (readl_relaxed_poll_timeout_atomic(spi->base + STM32H7_SPI_SR,
-                                             sr, !(sr & STM32H7_SPI_SR_EOT),
-                                             10, 100000) < 0) {
-               if (cr1 & STM32H7_SPI_CR1_CSTART) {
-                       writel_relaxed(cr1 | STM32H7_SPI_CR1_CSUSP,
-                                      spi->base + STM32H7_SPI_CR1);
-                       if (readl_relaxed_poll_timeout_atomic(
-                                               spi->base + STM32H7_SPI_SR,
-                                               sr, !(sr & STM32H7_SPI_SR_SUSP),
-                                               10, 100000) < 0)
-                               dev_warn(spi->dev,
-                                        "Suspend request timeout\n");
-               }
-       }
-
-       if (!spi->cur_usedma && spi->rx_buf && (spi->rx_len > 0))
-               stm32h7_spi_read_rxfifo(spi, true);
-
        if (spi->cur_usedma && spi->dma_tx)
                dmaengine_terminate_all(spi->dma_tx);
        if (spi->cur_usedma && spi->dma_rx)
@@ -911,7 +889,7 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
                if (__ratelimit(&rs))
                        dev_dbg_ratelimited(spi->dev, "Communication suspended\n");
                if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
-                       stm32h7_spi_read_rxfifo(spi, false);
+                       stm32h7_spi_read_rxfifo(spi);
                /*
                 * If communication is suspended while using DMA, it means
                 * that something went wrong, so stop the current transfer
@@ -932,8 +910,10 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
 
        if (sr & STM32H7_SPI_SR_EOT) {
                if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
-                       stm32h7_spi_read_rxfifo(spi, true);
-               end = true;
+                       stm32h7_spi_read_rxfifo(spi);
+               if (!spi->cur_usedma ||
+                   (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX))
+                       end = true;
        }
 
        if (sr & STM32H7_SPI_SR_TXP)
@@ -942,7 +922,7 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
 
        if (sr & STM32H7_SPI_SR_RXP)
                if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
-                       stm32h7_spi_read_rxfifo(spi, false);
+                       stm32h7_spi_read_rxfifo(spi);
 
        writel_relaxed(sr & mask, spi->base + STM32H7_SPI_IFCR);
 
@@ -1041,42 +1021,17 @@ static void stm32f4_spi_dma_tx_cb(void *data)
 }
 
 /**
- * stm32f4_spi_dma_rx_cb - dma callback
+ * stm32_spi_dma_rx_cb - dma callback
  * @data: pointer to the spi controller data structure
  *
  * DMA callback is called when the transfer is complete for DMA RX channel.
  */
-static void stm32f4_spi_dma_rx_cb(void *data)
+static void stm32_spi_dma_rx_cb(void *data)
 {
        struct stm32_spi *spi = data;
 
        spi_finalize_current_transfer(spi->master);
-       stm32f4_spi_disable(spi);
-}
-
-/**
- * stm32h7_spi_dma_cb - dma callback
- * @data: pointer to the spi controller data structure
- *
- * DMA callback is called when the transfer is complete or when an error
- * occurs. If the transfer is complete, EOT flag is raised.
- */
-static void stm32h7_spi_dma_cb(void *data)
-{
-       struct stm32_spi *spi = data;
-       unsigned long flags;
-       u32 sr;
-
-       spin_lock_irqsave(&spi->lock, flags);
-
-       sr = readl_relaxed(spi->base + STM32H7_SPI_SR);
-
-       spin_unlock_irqrestore(&spi->lock, flags);
-
-       if (!(sr & STM32H7_SPI_SR_EOT))
-               dev_warn(spi->dev, "DMA error (sr=0x%08x)\n", sr);
-
-       /* Now wait for EOT, or SUSP or OVR in case of error */
+       spi->cfg->disable(spi);
 }
 
 /**
@@ -1242,11 +1197,13 @@ static void stm32f4_spi_transfer_one_dma_start(struct stm32_spi *spi)
  */
 static void stm32h7_spi_transfer_one_dma_start(struct stm32_spi *spi)
 {
-       /* Enable the interrupts relative to the end of transfer */
-       stm32_spi_set_bits(spi, STM32H7_SPI_IER, STM32H7_SPI_IER_EOTIE |
-                                                STM32H7_SPI_IER_TXTFIE |
-                                                STM32H7_SPI_IER_OVRIE |
-                                                STM32H7_SPI_IER_MODFIE);
+       uint32_t ier = STM32H7_SPI_IER_OVRIE | STM32H7_SPI_IER_MODFIE;
+
+       /* Enable the interrupts */
+       if (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX)
+               ier |= STM32H7_SPI_IER_EOTIE | STM32H7_SPI_IER_TXTFIE;
+
+       stm32_spi_set_bits(spi, STM32H7_SPI_IER, ier);
 
        stm32_spi_enable(spi);
 
@@ -1645,10 +1602,6 @@ static int stm32_spi_transfer_one(struct spi_master *master,
        struct stm32_spi *spi = spi_master_get_devdata(master);
        int ret;
 
-       /* Don't do anything on 0 bytes transfers */
-       if (transfer->len == 0)
-               return 0;
-
        spi->tx_buf = transfer->tx_buf;
        spi->rx_buf = transfer->rx_buf;
        spi->tx_len = spi->tx_buf ? transfer->len : 0;
@@ -1762,7 +1715,7 @@ static const struct stm32_spi_cfg stm32f4_spi_cfg = {
        .set_mode = stm32f4_spi_set_mode,
        .transfer_one_dma_start = stm32f4_spi_transfer_one_dma_start,
        .dma_tx_cb = stm32f4_spi_dma_tx_cb,
-       .dma_rx_cb = stm32f4_spi_dma_rx_cb,
+       .dma_rx_cb = stm32_spi_dma_rx_cb,
        .transfer_one_irq = stm32f4_spi_transfer_one_irq,
        .irq_handler_event = stm32f4_spi_irq_event,
        .irq_handler_thread = stm32f4_spi_irq_thread,
@@ -1782,8 +1735,11 @@ static const struct stm32_spi_cfg stm32h7_spi_cfg = {
        .set_data_idleness = stm32h7_spi_data_idleness,
        .set_number_of_data = stm32h7_spi_number_of_data,
        .transfer_one_dma_start = stm32h7_spi_transfer_one_dma_start,
-       .dma_rx_cb = stm32h7_spi_dma_cb,
-       .dma_tx_cb = stm32h7_spi_dma_cb,
+       .dma_rx_cb = stm32_spi_dma_rx_cb,
+       /*
+        * dma_tx_cb is not necessary since in case of TX, dma is followed by
+        * SPI access hence handling is performed within the SPI interrupt
+        */
        .transfer_one_irq = stm32h7_spi_transfer_one_irq,
        .irq_handler_thread = stm32h7_spi_irq_thread,
        .baud_rate_div_min = STM32H7_SPI_MBR_DIV_MIN,
@@ -1927,6 +1883,9 @@ static int stm32_spi_probe(struct platform_device *pdev)
        if (spi->dma_tx || spi->dma_rx)
                master->can_dma = stm32_spi_can_dma;
 
+       pm_runtime_set_autosuspend_delay(&pdev->dev,
+                                        STM32_SPI_AUTOSUSPEND_DELAY);
+       pm_runtime_use_autosuspend(&pdev->dev);
        pm_runtime_set_active(&pdev->dev);
        pm_runtime_get_noresume(&pdev->dev);
        pm_runtime_enable(&pdev->dev);
@@ -1938,6 +1897,9 @@ static int stm32_spi_probe(struct platform_device *pdev)
                goto err_pm_disable;
        }
 
+       pm_runtime_mark_last_busy(&pdev->dev);
+       pm_runtime_put_autosuspend(&pdev->dev);
+
        dev_info(&pdev->dev, "driver initialized\n");
 
        return 0;
@@ -1946,6 +1908,7 @@ err_pm_disable:
        pm_runtime_disable(&pdev->dev);
        pm_runtime_put_noidle(&pdev->dev);
        pm_runtime_set_suspended(&pdev->dev);
+       pm_runtime_dont_use_autosuspend(&pdev->dev);
 err_dma_release:
        if (spi->dma_tx)
                dma_release_channel(spi->dma_tx);
@@ -1970,6 +1933,8 @@ static int stm32_spi_remove(struct platform_device *pdev)
        pm_runtime_disable(&pdev->dev);
        pm_runtime_put_noidle(&pdev->dev);
        pm_runtime_set_suspended(&pdev->dev);
+       pm_runtime_dont_use_autosuspend(&pdev->dev);
+
        if (master->dma_tx)
                dma_release_channel(master->dma_tx);
        if (master->dma_rx)
index 5131141..e9de1d9 100644 (file)
@@ -717,12 +717,12 @@ static void tegra_spi_deinit_dma_param(struct tegra_spi_data *tspi,
        dma_release_channel(dma_chan);
 }
 
-static int tegra_spi_set_hw_cs_timing(struct spi_device *spi,
-                                     struct spi_delay *setup,
-                                     struct spi_delay *hold,
-                                     struct spi_delay *inactive)
+static int tegra_spi_set_hw_cs_timing(struct spi_device *spi)
 {
        struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
+       struct spi_delay *setup = &spi->cs_setup;
+       struct spi_delay *hold = &spi->cs_hold;
+       struct spi_delay *inactive = &spi->cs_inactive;
        u8 setup_dly, hold_dly, inactive_dly;
        u32 setup_hold;
        u32 spi_cs_timing;
index 6a726c9..ebd27f8 100644 (file)
@@ -1061,33 +1061,12 @@ static int tegra_slink_probe(struct platform_device *pdev)
                dev_err(&pdev->dev, "Can not get clock %d\n", ret);
                goto exit_free_master;
        }
-       ret = clk_prepare(tspi->clk);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "Clock prepare failed %d\n", ret);
-               goto exit_free_master;
-       }
-       ret = clk_enable(tspi->clk);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "Clock enable failed %d\n", ret);
-               goto exit_clk_unprepare;
-       }
-
-       spi_irq = platform_get_irq(pdev, 0);
-       tspi->irq = spi_irq;
-       ret = request_threaded_irq(tspi->irq, tegra_slink_isr,
-                       tegra_slink_isr_thread, IRQF_ONESHOT,
-                       dev_name(&pdev->dev), tspi);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
-                                       tspi->irq);
-               goto exit_clk_disable;
-       }
 
        tspi->rst = devm_reset_control_get_exclusive(&pdev->dev, "spi");
        if (IS_ERR(tspi->rst)) {
                dev_err(&pdev->dev, "can not get reset\n");
                ret = PTR_ERR(tspi->rst);
-               goto exit_free_irq;
+               goto exit_free_master;
        }
 
        tspi->max_buf_size = SLINK_FIFO_DEPTH << 2;
@@ -1095,7 +1074,7 @@ static int tegra_slink_probe(struct platform_device *pdev)
 
        ret = tegra_slink_init_dma_param(tspi, true);
        if (ret < 0)
-               goto exit_free_irq;
+               goto exit_free_master;
        ret = tegra_slink_init_dma_param(tspi, false);
        if (ret < 0)
                goto exit_rx_dma_free;
@@ -1106,16 +1085,9 @@ static int tegra_slink_probe(struct platform_device *pdev)
        init_completion(&tspi->xfer_completion);
 
        pm_runtime_enable(&pdev->dev);
-       if (!pm_runtime_enabled(&pdev->dev)) {
-               ret = tegra_slink_runtime_resume(&pdev->dev);
-               if (ret)
-                       goto exit_pm_disable;
-       }
-
-       ret = pm_runtime_get_sync(&pdev->dev);
-       if (ret < 0) {
+       ret = pm_runtime_resume_and_get(&pdev->dev);
+       if (ret) {
                dev_err(&pdev->dev, "pm runtime get failed, e = %d\n", ret);
-               pm_runtime_put_noidle(&pdev->dev);
                goto exit_pm_disable;
        }
 
@@ -1123,33 +1095,43 @@ static int tegra_slink_probe(struct platform_device *pdev)
        udelay(2);
        reset_control_deassert(tspi->rst);
 
+       spi_irq = platform_get_irq(pdev, 0);
+       tspi->irq = spi_irq;
+       ret = request_threaded_irq(tspi->irq, tegra_slink_isr,
+                                  tegra_slink_isr_thread, IRQF_ONESHOT,
+                                  dev_name(&pdev->dev), tspi);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
+                       tspi->irq);
+               goto exit_pm_put;
+       }
+
        tspi->def_command_reg  = SLINK_M_S;
        tspi->def_command2_reg = SLINK_CS_ACTIVE_BETWEEN;
        tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);
        tegra_slink_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2);
-       pm_runtime_put(&pdev->dev);
 
        master->dev.of_node = pdev->dev.of_node;
-       ret = devm_spi_register_master(&pdev->dev, master);
+       ret = spi_register_master(master);
        if (ret < 0) {
                dev_err(&pdev->dev, "can not register to master err %d\n", ret);
-               goto exit_pm_disable;
+               goto exit_free_irq;
        }
+
+       pm_runtime_put(&pdev->dev);
+
        return ret;
 
+exit_free_irq:
+       free_irq(spi_irq, tspi);
+exit_pm_put:
+       pm_runtime_put(&pdev->dev);
 exit_pm_disable:
        pm_runtime_disable(&pdev->dev);
-       if (!pm_runtime_status_suspended(&pdev->dev))
-               tegra_slink_runtime_suspend(&pdev->dev);
+
        tegra_slink_deinit_dma_param(tspi, false);
 exit_rx_dma_free:
        tegra_slink_deinit_dma_param(tspi, true);
-exit_free_irq:
-       free_irq(spi_irq, tspi);
-exit_clk_disable:
-       clk_disable(tspi->clk);
-exit_clk_unprepare:
-       clk_unprepare(tspi->clk);
 exit_free_master:
        spi_master_put(master);
        return ret;
@@ -1160,10 +1142,11 @@ static int tegra_slink_remove(struct platform_device *pdev)
        struct spi_master *master = platform_get_drvdata(pdev);
        struct tegra_slink_data *tspi = spi_master_get_devdata(master);
 
+       spi_unregister_master(master);
+
        free_irq(tspi->irq, tspi);
 
-       clk_disable(tspi->clk);
-       clk_unprepare(tspi->clk);
+       pm_runtime_disable(&pdev->dev);
 
        if (tspi->tx_dma_chan)
                tegra_slink_deinit_dma_param(tspi, false);
@@ -1171,10 +1154,6 @@ static int tegra_slink_remove(struct platform_device *pdev)
        if (tspi->rx_dma_chan)
                tegra_slink_deinit_dma_param(tspi, true);
 
-       pm_runtime_disable(&pdev->dev);
-       if (!pm_runtime_status_suspended(&pdev->dev))
-               tegra_slink_runtime_suspend(&pdev->dev);
-
        return 0;
 }
 
index 9262c64..cfa222c 100644 (file)
@@ -545,7 +545,7 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
                zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
                zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
                                ZYNQ_QSPI_IXR_RXTX_MASK);
-               if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
+               if (!wait_for_completion_timeout(&xqspi->data_completion,
                                                               msecs_to_jiffies(1000)))
                        err = -ETIMEDOUT;
        }
@@ -563,7 +563,7 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
                zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
                zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
                                ZYNQ_QSPI_IXR_RXTX_MASK);
-               if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
+               if (!wait_for_completion_timeout(&xqspi->data_completion,
                                                               msecs_to_jiffies(1000)))
                        err = -ETIMEDOUT;
        }
@@ -579,7 +579,7 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
                zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
                zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
                                ZYNQ_QSPI_IXR_RXTX_MASK);
-               if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
+               if (!wait_for_completion_timeout(&xqspi->data_completion,
                                                               msecs_to_jiffies(1000)))
                        err = -ETIMEDOUT;
 
@@ -603,7 +603,7 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
                zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
                zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
                                ZYNQ_QSPI_IXR_RXTX_MASK);
-               if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
+               if (!wait_for_completion_timeout(&xqspi->data_completion,
                                                               msecs_to_jiffies(1000)))
                        err = -ETIMEDOUT;
        }
index c991811..65d14af 100644 (file)
@@ -58,6 +58,10 @@ modalias_show(struct device *dev, struct device_attribute *a, char *buf)
        const struct spi_device *spi = to_spi_device(dev);
        int len;
 
+       len = of_device_modalias(dev, buf, PAGE_SIZE);
+       if (len != -ENODEV)
+               return len;
+
        len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
        if (len != -ENODEV)
                return len;
@@ -842,9 +846,9 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
        if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio) ||
            !spi->controller->set_cs_timing) {
                if (activate)
-                       spi_delay_exec(&spi->controller->cs_setup, NULL);
+                       spi_delay_exec(&spi->cs_setup, NULL);
                else
-                       spi_delay_exec(&spi->controller->cs_hold, NULL);
+                       spi_delay_exec(&spi->cs_hold, NULL);
        }
 
        if (spi->mode & SPI_CS_HIGH)
@@ -887,7 +891,7 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
        if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio) ||
            !spi->controller->set_cs_timing) {
                if (!activate)
-                       spi_delay_exec(&spi->controller->cs_inactive, NULL);
+                       spi_delay_exec(&spi->cs_inactive, NULL);
        }
 }
 
index 6f5fe50..c8a6256 100644 (file)
@@ -1904,8 +1904,8 @@ int __atomisp_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
        dev_dbg(isp->dev, "Stop stream on pad %d for asd%d\n",
                atomisp_subdev_source_pad(vdev), asd->index);
 
-       BUG_ON(!rt_mutex_is_locked(&isp->mutex));
-       BUG_ON(!mutex_is_locked(&isp->streamoff_mutex));
+       lockdep_assert_held(&isp->mutex);
+       lockdep_assert_held(&isp->streamoff_mutex);
 
        if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
                dev_dbg(isp->dev, "unsupported v4l2 buf type\n");
diff --git a/drivers/staging/media/av7110/audio.h b/drivers/staging/media/av7110/audio.h
deleted file mode 100644 (file)
index 2f869da..0000000
+++ /dev/null
@@ -1,101 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1+ WITH Linux-syscall-note */
-/*
- * audio.h - DEPRECATED MPEG-TS audio decoder API
- *
- * NOTE: should not be used on future drivers
- *
- * Copyright (C) 2000 Ralph  Metzler <ralph@convergence.de>
- *                  & Marcus Metzler <marcus@convergence.de>
- *                    for convergence integrated media GmbH
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Lesser Public License
- * as published by the Free Software Foundation; either version 2.1
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
- *
- */
-
-#ifndef _DVBAUDIO_H_
-#define _DVBAUDIO_H_
-
-#include <linux/types.h>
-
-typedef enum {
-       AUDIO_SOURCE_DEMUX, /* Select the demux as the main source */
-       AUDIO_SOURCE_MEMORY /* Select internal memory as the main source */
-} audio_stream_source_t;
-
-
-typedef enum {
-       AUDIO_STOPPED,      /* Device is stopped */
-       AUDIO_PLAYING,      /* Device is currently playing */
-       AUDIO_PAUSED        /* Device is paused */
-} audio_play_state_t;
-
-
-typedef enum {
-       AUDIO_STEREO,
-       AUDIO_MONO_LEFT,
-       AUDIO_MONO_RIGHT,
-       AUDIO_MONO,
-       AUDIO_STEREO_SWAPPED
-} audio_channel_select_t;
-
-
-typedef struct audio_mixer {
-       unsigned int volume_left;
-       unsigned int volume_right;
-  /* what else do we need? bass, pass-through, ... */
-} audio_mixer_t;
-
-
-typedef struct audio_status {
-       int                    AV_sync_state;  /* sync audio and video? */
-       int                    mute_state;     /* audio is muted */
-       audio_play_state_t     play_state;     /* current playback state */
-       audio_stream_source_t  stream_source;  /* current stream source */
-       audio_channel_select_t channel_select; /* currently selected channel */
-       int                    bypass_mode;    /* pass on audio data to */
-       audio_mixer_t          mixer_state;    /* current mixer state */
-} audio_status_t;                              /* separate decoder hardware */
-
-
-/* for GET_CAPABILITIES and SET_FORMAT, the latter should only set one bit */
-#define AUDIO_CAP_DTS    1
-#define AUDIO_CAP_LPCM   2
-#define AUDIO_CAP_MP1    4
-#define AUDIO_CAP_MP2    8
-#define AUDIO_CAP_MP3   16
-#define AUDIO_CAP_AAC   32
-#define AUDIO_CAP_OGG   64
-#define AUDIO_CAP_SDDS 128
-#define AUDIO_CAP_AC3  256
-
-#define AUDIO_STOP                 _IO('o', 1)
-#define AUDIO_PLAY                 _IO('o', 2)
-#define AUDIO_PAUSE                _IO('o', 3)
-#define AUDIO_CONTINUE             _IO('o', 4)
-#define AUDIO_SELECT_SOURCE        _IO('o', 5)
-#define AUDIO_SET_MUTE             _IO('o', 6)
-#define AUDIO_SET_AV_SYNC          _IO('o', 7)
-#define AUDIO_SET_BYPASS_MODE      _IO('o', 8)
-#define AUDIO_CHANNEL_SELECT       _IO('o', 9)
-#define AUDIO_GET_STATUS           _IOR('o', 10, audio_status_t)
-
-#define AUDIO_GET_CAPABILITIES     _IOR('o', 11, unsigned int)
-#define AUDIO_CLEAR_BUFFER         _IO('o',  12)
-#define AUDIO_SET_ID               _IO('o', 13)
-#define AUDIO_SET_MIXER            _IOW('o', 14, audio_mixer_t)
-#define AUDIO_SET_STREAMTYPE       _IO('o', 15)
-#define AUDIO_BILINGUAL_CHANNEL_SELECT _IO('o', 20)
-
-#endif /* _DVBAUDIO_H_ */
index b8e8fc8..809d938 100644 (file)
@@ -9,12 +9,11 @@
 #include <linux/input.h>
 #include <linux/time.h>
 
-#include "video.h"
-#include "audio.h"
-#include "osd.h"
-
+#include <linux/dvb/video.h>
+#include <linux/dvb/audio.h>
 #include <linux/dvb/dmx.h>
 #include <linux/dvb/ca.h>
+#include <linux/dvb/osd.h>
 #include <linux/dvb/net.h>
 #include <linux/mutex.h>
 
diff --git a/drivers/staging/media/av7110/osd.h b/drivers/staging/media/av7110/osd.h
deleted file mode 100644 (file)
index 858997c..0000000
+++ /dev/null
@@ -1,181 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1+ WITH Linux-syscall-note */
-/*
- * osd.h - DEPRECATED On Screen Display API
- *
- * NOTE: should not be used on future drivers
- *
- * Copyright (C) 2001 Ralph  Metzler <ralph@convergence.de>
- *                  & Marcus Metzler <marcus@convergence.de>
- *                    for convergence integrated media GmbH
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Lesser Public License
- * as published by the Free Software Foundation; either version 2.1
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
- *
- */
-
-#ifndef _DVBOSD_H_
-#define _DVBOSD_H_
-
-#include <linux/compiler.h>
-
-typedef enum {
-       /* All functions return -2 on "not open" */
-       OSD_Close = 1,  /* () */
-       /*
-        * Disables OSD and releases the buffers
-        * returns 0 on success
-        */
-       OSD_Open,       /* (x0,y0,x1,y1,BitPerPixel[2/4/8](color&0x0F),mix[0..15](color&0xF0)) */
-       /*
-        * Opens OSD with this size and bit depth
-        * returns 0 on success, -1 on DRAM allocation error, -2 on "already open"
-        */
-       OSD_Show,       /* () */
-       /*
-        * enables OSD mode
-        * returns 0 on success
-        */
-       OSD_Hide,       /* () */
-       /*
-        * disables OSD mode
-        * returns 0 on success
-        */
-       OSD_Clear,      /* () */
-       /*
-        * Sets all pixel to color 0
-        * returns 0 on success
-        */
-       OSD_Fill,       /* (color) */
-       /*
-        * Sets all pixel to color <col>
-        * returns 0 on success
-        */
-       OSD_SetColor,   /* (color,R{x0},G{y0},B{x1},opacity{y1}) */
-       /*
-        * set palette entry <num> to <r,g,b>, <mix> and <trans> apply
-        * R,G,B: 0..255
-        * R=Red, G=Green, B=Blue
-        * opacity=0:      pixel opacity 0% (only video pixel shows)
-        * opacity=1..254: pixel opacity as specified in header
-        * opacity=255:    pixel opacity 100% (only OSD pixel shows)
-        * returns 0 on success, -1 on error
-        */
-       OSD_SetPalette, /* (firstcolor{color},lastcolor{x0},data) */
-       /*
-        * Set a number of entries in the palette
-        * sets the entries "firstcolor" through "lastcolor" from the array "data"
-        * data has 4 byte for each color:
-        * R,G,B, and a opacity value: 0->transparent, 1..254->mix, 255->pixel
-        */
-       OSD_SetTrans,   /* (transparency{color}) */
-       /*
-        * Sets transparency of mixed pixel (0..15)
-        * returns 0 on success
-        */
-       OSD_SetPixel,   /* (x0,y0,color) */
-       /*
-        * sets pixel <x>,<y> to color number <col>
-        * returns 0 on success, -1 on error
-        */
-       OSD_GetPixel,   /* (x0,y0) */
-       /* returns color number of pixel <x>,<y>,  or -1 */
-       OSD_SetRow,     /* (x0,y0,x1,data) */
-       /*
-        * fills pixels x0,y through  x1,y with the content of data[]
-        * returns 0 on success, -1 on clipping all pixel (no pixel drawn)
-        */
-       OSD_SetBlock,   /* (x0,y0,x1,y1,increment{color},data) */
-       /*
-        * fills pixels x0,y0 through  x1,y1 with the content of data[]
-        * inc contains the width of one line in the data block,
-        * inc<=0 uses blockwidth as linewidth
-        * returns 0 on success, -1 on clipping all pixel
-        */
-       OSD_FillRow,    /* (x0,y0,x1,color) */
-       /*
-        * fills pixels x0,y through  x1,y with the color <col>
-        * returns 0 on success, -1 on clipping all pixel
-        */
-       OSD_FillBlock,  /* (x0,y0,x1,y1,color) */
-       /*
-        * fills pixels x0,y0 through  x1,y1 with the color <col>
-        * returns 0 on success, -1 on clipping all pixel
-        */
-       OSD_Line,       /* (x0,y0,x1,y1,color) */
-       /*
-        * draw a line from x0,y0 to x1,y1 with the color <col>
-        * returns 0 on success
-        */
-       OSD_Query,      /* (x0,y0,x1,y1,xasp{color}}), yasp=11 */
-       /*
-        * fills parameters with the picture dimensions and the pixel aspect ratio
-        * returns 0 on success
-        */
-       OSD_Test,       /* () */
-       /*
-        * draws a test picture. for debugging purposes only
-        * returns 0 on success
-        * TODO: remove "test" in final version
-        */
-       OSD_Text,       /* (x0,y0,size,color,text) */
-       OSD_SetWindow,  /* (x0) set window with number 0<x0<8 as current */
-       OSD_MoveWindow, /* move current window to (x0, y0) */
-       OSD_OpenRaw,    /* Open other types of OSD windows */
-} OSD_Command;
-
-typedef struct osd_cmd_s {
-       OSD_Command cmd;
-       int x0;
-       int y0;
-       int x1;
-       int y1;
-       int color;
-       void __user *data;
-} osd_cmd_t;
-
-/* OSD_OpenRaw: set 'color' to desired window type */
-typedef enum {
-       OSD_BITMAP1,           /* 1 bit bitmap */
-       OSD_BITMAP2,           /* 2 bit bitmap */
-       OSD_BITMAP4,           /* 4 bit bitmap */
-       OSD_BITMAP8,           /* 8 bit bitmap */
-       OSD_BITMAP1HR,         /* 1 Bit bitmap half resolution */
-       OSD_BITMAP2HR,         /* 2 bit bitmap half resolution */
-       OSD_BITMAP4HR,         /* 4 bit bitmap half resolution */
-       OSD_BITMAP8HR,         /* 8 bit bitmap half resolution */
-       OSD_YCRCB422,          /* 4:2:2 YCRCB Graphic Display */
-       OSD_YCRCB444,          /* 4:4:4 YCRCB Graphic Display */
-       OSD_YCRCB444HR,        /* 4:4:4 YCRCB graphic half resolution */
-       OSD_VIDEOTSIZE,        /* True Size Normal MPEG Video Display */
-       OSD_VIDEOHSIZE,        /* MPEG Video Display Half Resolution */
-       OSD_VIDEOQSIZE,        /* MPEG Video Display Quarter Resolution */
-       OSD_VIDEODSIZE,        /* MPEG Video Display Double Resolution */
-       OSD_VIDEOTHSIZE,       /* True Size MPEG Video Display Half Resolution */
-       OSD_VIDEOTQSIZE,       /* True Size MPEG Video Display Quarter Resolution*/
-       OSD_VIDEOTDSIZE,       /* True Size MPEG Video Display Double Resolution */
-       OSD_VIDEONSIZE,        /* Full Size MPEG Video Display */
-       OSD_CURSOR             /* Cursor */
-} osd_raw_window_t;
-
-typedef struct osd_cap_s {
-       int  cmd;
-#define OSD_CAP_MEMSIZE         1  /* memory size */
-       long val;
-} osd_cap_t;
-
-
-#define OSD_SEND_CMD            _IOW('o', 160, osd_cmd_t)
-#define OSD_GET_CAPABILITY      _IOR('o', 161, osd_cap_t)
-
-#endif
diff --git a/drivers/staging/media/av7110/video.h b/drivers/staging/media/av7110/video.h
deleted file mode 100644 (file)
index 179f1ec..0000000
+++ /dev/null
@@ -1,220 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1+ WITH Linux-syscall-note */
-/*
- * video.h - DEPRECATED MPEG-TS video decoder API
- *
- * NOTE: should not be used on future drivers
- *
- * Copyright (C) 2000 Marcus Metzler <marcus@convergence.de>
- *                  & Ralph  Metzler <ralph@convergence.de>
- *                    for convergence integrated media GmbH
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public License
- * as published by the Free Software Foundation; either version 2.1
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
- *
- */
-
-#ifndef _UAPI_DVBVIDEO_H_
-#define _UAPI_DVBVIDEO_H_
-
-#include <linux/types.h>
-#ifndef __KERNEL__
-#include <time.h>
-#endif
-
-typedef enum {
-       VIDEO_FORMAT_4_3,     /* Select 4:3 format */
-       VIDEO_FORMAT_16_9,    /* Select 16:9 format. */
-       VIDEO_FORMAT_221_1    /* 2.21:1 */
-} video_format_t;
-
-
-typedef enum {
-       VIDEO_PAN_SCAN,       /* use pan and scan format */
-       VIDEO_LETTER_BOX,     /* use letterbox format */
-       VIDEO_CENTER_CUT_OUT  /* use center cut out format */
-} video_displayformat_t;
-
-typedef struct {
-       int w;
-       int h;
-       video_format_t aspect_ratio;
-} video_size_t;
-
-typedef enum {
-       VIDEO_SOURCE_DEMUX, /* Select the demux as the main source */
-       VIDEO_SOURCE_MEMORY /* If this source is selected, the stream
-                              comes from the user through the write
-                              system call */
-} video_stream_source_t;
-
-
-typedef enum {
-       VIDEO_STOPPED, /* Video is stopped */
-       VIDEO_PLAYING, /* Video is currently playing */
-       VIDEO_FREEZED  /* Video is freezed */
-} video_play_state_t;
-
-
-/* Decoder commands */
-#define VIDEO_CMD_PLAY        (0)
-#define VIDEO_CMD_STOP        (1)
-#define VIDEO_CMD_FREEZE      (2)
-#define VIDEO_CMD_CONTINUE    (3)
-
-/* Flags for VIDEO_CMD_FREEZE */
-#define VIDEO_CMD_FREEZE_TO_BLACK      (1 << 0)
-
-/* Flags for VIDEO_CMD_STOP */
-#define VIDEO_CMD_STOP_TO_BLACK                (1 << 0)
-#define VIDEO_CMD_STOP_IMMEDIATELY     (1 << 1)
-
-/* Play input formats: */
-/* The decoder has no special format requirements */
-#define VIDEO_PLAY_FMT_NONE         (0)
-/* The decoder requires full GOPs */
-#define VIDEO_PLAY_FMT_GOP          (1)
-
-/* The structure must be zeroed before use by the application
-   This ensures it can be extended safely in the future. */
-struct video_command {
-       __u32 cmd;
-       __u32 flags;
-       union {
-               struct {
-                       __u64 pts;
-               } stop;
-
-               struct {
-                       /* 0 or 1000 specifies normal speed,
-                          1 specifies forward single stepping,
-                          -1 specifies backward single stepping,
-                          >1: playback at speed/1000 of the normal speed,
-                          <-1: reverse playback at (-speed/1000) of the normal speed. */
-                       __s32 speed;
-                       __u32 format;
-               } play;
-
-               struct {
-                       __u32 data[16];
-               } raw;
-       };
-};
-
-/* FIELD_UNKNOWN can be used if the hardware does not know whether
-   the Vsync is for an odd, even or progressive (i.e. non-interlaced)
-   field. */
-#define VIDEO_VSYNC_FIELD_UNKNOWN      (0)
-#define VIDEO_VSYNC_FIELD_ODD          (1)
-#define VIDEO_VSYNC_FIELD_EVEN         (2)
-#define VIDEO_VSYNC_FIELD_PROGRESSIVE  (3)
-
-struct video_event {
-       __s32 type;
-#define VIDEO_EVENT_SIZE_CHANGED       1
-#define VIDEO_EVENT_FRAME_RATE_CHANGED 2
-#define VIDEO_EVENT_DECODER_STOPPED    3
-#define VIDEO_EVENT_VSYNC              4
-       /* unused, make sure to use atomic time for y2038 if it ever gets used */
-       long timestamp;
-       union {
-               video_size_t size;
-               unsigned int frame_rate;        /* in frames per 1000sec */
-               unsigned char vsync_field;      /* unknown/odd/even/progressive */
-       } u;
-};
-
-
-struct video_status {
-       int                   video_blank;   /* blank video on freeze? */
-       video_play_state_t    play_state;    /* current state of playback */
-       video_stream_source_t stream_source; /* current source (demux/memory) */
-       video_format_t        video_format;  /* current aspect ratio of stream*/
-       video_displayformat_t display_format;/* selected cropping mode */
-};
-
-
-struct video_still_picture {
-       char __user *iFrame;        /* pointer to a single iframe in memory */
-       __s32 size;
-};
-
-
-typedef __u16 video_attributes_t;
-/*   bits: descr. */
-/*   15-14 Video compression mode (0=MPEG-1, 1=MPEG-2) */
-/*   13-12 TV system (0=525/60, 1=625/50) */
-/*   11-10 Aspect ratio (0=4:3, 3=16:9) */
-/*    9- 8 permitted display mode on 4:3 monitor (0=both, 1=only pan-sca */
-/*    7    line 21-1 data present in GOP (1=yes, 0=no) */
-/*    6    line 21-2 data present in GOP (1=yes, 0=no) */
-/*    5- 3 source resolution (0=720x480/576, 1=704x480/576, 2=352x480/57 */
-/*    2    source letterboxed (1=yes, 0=no) */
-/*    0    film/camera mode (0=
- *camera, 1=film (625/50 only)) */
-
-
-/* bit definitions for capabilities: */
-/* can the hardware decode MPEG1 and/or MPEG2? */
-#define VIDEO_CAP_MPEG1   1
-#define VIDEO_CAP_MPEG2   2
-/* can you send a system and/or program stream to video device?
-   (you still have to open the video and the audio device but only
-    send the stream to the video device) */
-#define VIDEO_CAP_SYS     4
-#define VIDEO_CAP_PROG    8
-/* can the driver also handle SPU, NAVI and CSS encoded data?
-   (CSS API is not present yet) */
-#define VIDEO_CAP_SPU    16
-#define VIDEO_CAP_NAVI   32
-#define VIDEO_CAP_CSS    64
-
-
-#define VIDEO_STOP                 _IO('o', 21)
-#define VIDEO_PLAY                 _IO('o', 22)
-#define VIDEO_FREEZE               _IO('o', 23)
-#define VIDEO_CONTINUE             _IO('o', 24)
-#define VIDEO_SELECT_SOURCE        _IO('o', 25)
-#define VIDEO_SET_BLANK            _IO('o', 26)
-#define VIDEO_GET_STATUS           _IOR('o', 27, struct video_status)
-#define VIDEO_GET_EVENT            _IOR('o', 28, struct video_event)
-#define VIDEO_SET_DISPLAY_FORMAT   _IO('o', 29)
-#define VIDEO_STILLPICTURE         _IOW('o', 30, struct video_still_picture)
-#define VIDEO_FAST_FORWARD         _IO('o', 31)
-#define VIDEO_SLOWMOTION           _IO('o', 32)
-#define VIDEO_GET_CAPABILITIES     _IOR('o', 33, unsigned int)
-#define VIDEO_CLEAR_BUFFER         _IO('o',  34)
-#define VIDEO_SET_STREAMTYPE       _IO('o', 36)
-#define VIDEO_SET_FORMAT           _IO('o', 37)
-#define VIDEO_GET_SIZE             _IOR('o', 55, video_size_t)
-
-/**
- * VIDEO_GET_PTS
- *
- * Read the 33 bit presentation time stamp as defined
- * in ITU T-REC-H.222.0 / ISO/IEC 13818-1.
- *
- * The PTS should belong to the currently played
- * frame if possible, but may also be a value close to it
- * like the PTS of the last decoded frame or the last PTS
- * extracted by the PES parser.
- */
-#define VIDEO_GET_PTS              _IOR('o', 57, __u64)
-
-/* Read the number of displayed frames since the decoder was started */
-#define VIDEO_GET_FRAME_COUNT     _IOR('o', 58, __u64)
-
-#define VIDEO_COMMAND             _IOWR('o', 59, struct video_command)
-#define VIDEO_TRY_COMMAND         _IOWR('o', 60, struct video_command)
-
-#endif /* _UAPI_DVBVIDEO_H_ */
index 691030e..f9bdf4e 100644 (file)
@@ -422,7 +422,6 @@ static void mt7621_pcie_init_ports(struct mt7621_pcie *pcie)
                        dev_err(dev, "pcie%d no card, disable it (RST & CLK)\n",
                                slot);
                        mt7621_control_assert(port);
-                       clk_disable_unprepare(port->clk);
                        port->enabled = false;
 
                        if (slot == 0) {
index 2297427..4eff3fd 100644 (file)
 #define FWBUFF_ALIGN_SZ 512
 #define MAX_DUMP_FWSZ (48 * 1024)
 
+static void rtl871x_load_fw_fail(struct _adapter *adapter)
+{
+       struct usb_device *udev = adapter->dvobjpriv.pusbdev;
+       struct device *dev = &udev->dev;
+       struct device *parent = dev->parent;
+
+       complete(&adapter->rtl8712_fw_ready);
+
+       dev_err(&udev->dev, "r8712u: Firmware request failed\n");
+
+       if (parent)
+               device_lock(parent);
+
+       device_release_driver(dev);
+
+       if (parent)
+               device_unlock(parent);
+}
+
 static void rtl871x_load_fw_cb(const struct firmware *firmware, void *context)
 {
        struct _adapter *adapter = context;
 
        if (!firmware) {
-               struct usb_device *udev = adapter->dvobjpriv.pusbdev;
-               struct usb_interface *usb_intf = adapter->pusb_intf;
-
-               dev_err(&udev->dev, "r8712u: Firmware request failed\n");
-               usb_put_dev(udev);
-               usb_set_intfdata(usb_intf, NULL);
-               r8712_free_drv_sw(adapter);
-               adapter->dvobj_deinit(adapter);
-               complete(&adapter->rtl8712_fw_ready);
-               free_netdev(adapter->pnetdev);
+               rtl871x_load_fw_fail(adapter);
                return;
        }
        adapter->fw = firmware;
index 5901026..d5fc902 100644 (file)
@@ -1820,3 +1820,11 @@ void LedControl871x(struct _adapter *padapter, enum LED_CTL_MODE LedAction)
                break;
        }
 }
+
+void r8712_flush_led_works(struct _adapter *padapter)
+{
+       struct led_priv *pledpriv = &padapter->ledpriv;
+
+       flush_work(&pledpriv->SwLed0.BlinkWorkItem);
+       flush_work(&pledpriv->SwLed1.BlinkWorkItem);
+}
index ee19c87..2f07681 100644 (file)
@@ -112,6 +112,7 @@ struct led_priv {
 void r8712_InitSwLeds(struct _adapter *padapter);
 void r8712_DeInitSwLeds(struct _adapter *padapter);
 void LedControl871x(struct _adapter *padapter, enum LED_CTL_MODE LedAction);
+void r8712_flush_led_works(struct _adapter *padapter);
 
 #endif
 
index 23cff43..cd6d9ff 100644 (file)
@@ -224,3 +224,11 @@ void r8712_unregister_cmd_alive(struct _adapter *padapter)
        }
        mutex_unlock(&pwrctrl->mutex_lock);
 }
+
+void r8712_flush_rwctrl_works(struct _adapter *padapter)
+{
+       struct pwrctrl_priv *pwrctrl = &padapter->pwrctrlpriv;
+
+       flush_work(&pwrctrl->SetPSModeWorkItem);
+       flush_work(&pwrctrl->rpwm_workitem);
+}
index bf6623c..b35b9c7 100644 (file)
@@ -108,5 +108,6 @@ void r8712_cpwm_int_hdl(struct _adapter *padapter,
 void r8712_set_ps_mode(struct _adapter *padapter, uint ps_mode,
                        uint smart_ps);
 void r8712_set_rpwm(struct _adapter *padapter, u8 val8);
+void r8712_flush_rwctrl_works(struct _adapter *padapter);
 
 #endif  /* __RTL871X_PWRCTRL_H_ */
index 2434b13..505ebeb 100644 (file)
@@ -591,35 +591,30 @@ static void r871xu_dev_remove(struct usb_interface *pusb_intf)
 {
        struct net_device *pnetdev = usb_get_intfdata(pusb_intf);
        struct usb_device *udev = interface_to_usbdev(pusb_intf);
+       struct _adapter *padapter = netdev_priv(pnetdev);
+
+       /* never exit with a firmware callback pending */
+       wait_for_completion(&padapter->rtl8712_fw_ready);
+       usb_set_intfdata(pusb_intf, NULL);
+       release_firmware(padapter->fw);
+       if (drvpriv.drv_registered)
+               padapter->surprise_removed = true;
+       if (pnetdev->reg_state != NETREG_UNINITIALIZED)
+               unregister_netdev(pnetdev); /* will call netdev_close() */
+       r8712_flush_rwctrl_works(padapter);
+       r8712_flush_led_works(padapter);
+       udelay(1);
+       /* Stop driver mlme relation timer */
+       r8712_stop_drv_timers(padapter);
+       r871x_dev_unload(padapter);
+       r8712_free_drv_sw(padapter);
+       free_netdev(pnetdev);
+
+       /* decrease the reference count of the usb device structure
+        * when disconnect
+        */
+       usb_put_dev(udev);
 
-       if (pnetdev) {
-               struct _adapter *padapter = netdev_priv(pnetdev);
-
-               /* never exit with a firmware callback pending */
-               wait_for_completion(&padapter->rtl8712_fw_ready);
-               pnetdev = usb_get_intfdata(pusb_intf);
-               usb_set_intfdata(pusb_intf, NULL);
-               if (!pnetdev)
-                       goto firmware_load_fail;
-               release_firmware(padapter->fw);
-               if (drvpriv.drv_registered)
-                       padapter->surprise_removed = true;
-               if (pnetdev->reg_state != NETREG_UNINITIALIZED)
-                       unregister_netdev(pnetdev); /* will call netdev_close() */
-               flush_scheduled_work();
-               udelay(1);
-               /* Stop driver mlme relation timer */
-               r8712_stop_drv_timers(padapter);
-               r871x_dev_unload(padapter);
-               r8712_free_drv_sw(padapter);
-               free_netdev(pnetdev);
-
-               /* decrease the reference count of the usb device structure
-                * when disconnect
-                */
-               usb_put_dev(udev);
-       }
-firmware_load_fail:
        /* If we didn't unplug usb dongle and remove/insert module, driver
         * fails on sitesurvey for the first time when device is up.
         * Reset usb port for sitesurvey fail issue.
index a884673..7eae820 100644 (file)
@@ -5,6 +5,7 @@ config RTL8723BS
        depends on m
        select WIRELESS_EXT
        select WEXT_PRIV
+       select CRYPTO_LIB_ARC4
        help
        This option enables support for RTL8723BS SDIO drivers, such as
        the wifi found on the 1st gen Intel Compute Stick, the CHIP
index 2dd251c..a545832 100644 (file)
@@ -909,6 +909,8 @@ void sd_int_dpc(struct adapter *adapter)
                                } else {
                                        rtw_c2h_wk_cmd(adapter, (u8 *)c2h_evt);
                                }
+                       } else {
+                               kfree(c2h_evt);
                        }
                } else {
                        /* Error handling for malloc fail */
index 6e6eb83..945f03d 100644 (file)
@@ -184,7 +184,7 @@ static struct tee_shm *get_msg_arg(struct tee_context *ctx, size_t num_params,
        struct optee_msg_arg *ma;
 
        shm = tee_shm_alloc(ctx, OPTEE_MSG_GET_ARG_SIZE(num_params),
-                           TEE_SHM_MAPPED);
+                           TEE_SHM_MAPPED | TEE_SHM_PRIV);
        if (IS_ERR(shm))
                return shm;
 
@@ -416,11 +416,13 @@ void optee_enable_shm_cache(struct optee *optee)
 }
 
 /**
- * optee_disable_shm_cache() - Disables caching of some shared memory allocation
- *                           in OP-TEE
+ * __optee_disable_shm_cache() - Disables caching of some shared memory
+ *                               allocation in OP-TEE
  * @optee:     main service struct
+ * @is_mapped: true if the cached shared memory addresses were mapped by this
+ *             kernel, are safe to dereference, and should be freed
  */
-void optee_disable_shm_cache(struct optee *optee)
+static void __optee_disable_shm_cache(struct optee *optee, bool is_mapped)
 {
        struct optee_call_waiter w;
 
@@ -439,6 +441,13 @@ void optee_disable_shm_cache(struct optee *optee)
                if (res.result.status == OPTEE_SMC_RETURN_OK) {
                        struct tee_shm *shm;
 
+                       /*
+                        * Shared memory references that were not mapped by
+                        * this kernel must be ignored to prevent a crash.
+                        */
+                       if (!is_mapped)
+                               continue;
+
                        shm = reg_pair_to_ptr(res.result.shm_upper32,
                                              res.result.shm_lower32);
                        tee_shm_free(shm);
@@ -449,6 +458,27 @@ void optee_disable_shm_cache(struct optee *optee)
        optee_cq_wait_final(&optee->call_queue, &w);
 }
 
+/**
+ * optee_disable_shm_cache() - Disables caching of mapped shared memory
+ *                             allocations in OP-TEE
+ * @optee:     main service struct
+ */
+void optee_disable_shm_cache(struct optee *optee)
+{
+       return __optee_disable_shm_cache(optee, true);
+}
+
+/**
+ * optee_disable_unmapped_shm_cache() - Disables caching of shared memory
+ *                                      allocations in OP-TEE which are not
+ *                                      currently mapped
+ * @optee:     main service struct
+ */
+void optee_disable_unmapped_shm_cache(struct optee *optee)
+{
+       return __optee_disable_shm_cache(optee, false);
+}
+
 #define PAGELIST_ENTRIES_PER_PAGE                              \
        ((OPTEE_MSG_NONCONTIG_PAGE_SIZE / sizeof(u64)) - 1)
 
index ddb8f9e..5ce13b0 100644 (file)
@@ -6,6 +6,7 @@
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/arm-smccc.h>
+#include <linux/crash_dump.h>
 #include <linux/errno.h>
 #include <linux/io.h>
 #include <linux/module.h>
@@ -277,7 +278,8 @@ static void optee_release(struct tee_context *ctx)
        if (!ctxdata)
                return;
 
-       shm = tee_shm_alloc(ctx, sizeof(struct optee_msg_arg), TEE_SHM_MAPPED);
+       shm = tee_shm_alloc(ctx, sizeof(struct optee_msg_arg),
+                           TEE_SHM_MAPPED | TEE_SHM_PRIV);
        if (!IS_ERR(shm)) {
                arg = tee_shm_get_va(shm, 0);
                /*
@@ -572,6 +574,13 @@ static optee_invoke_fn *get_invoke_func(struct device *dev)
        return ERR_PTR(-EINVAL);
 }
 
+/* optee_remove - Device Removal Routine
+ * @pdev: platform device information struct
+ *
+ * optee_remove is called by platform subsystem to alert the driver
+ * that it should release the device
+ */
+
 static int optee_remove(struct platform_device *pdev)
 {
        struct optee *optee = platform_get_drvdata(pdev);
@@ -602,6 +611,18 @@ static int optee_remove(struct platform_device *pdev)
        return 0;
 }
 
+/* optee_shutdown - Device Removal Routine
+ * @pdev: platform device information struct
+ *
+ * platform_shutdown is called by the platform subsystem to alert
+ * the driver that a shutdown, reboot, or kexec is happening and
+ * device must be disabled.
+ */
+static void optee_shutdown(struct platform_device *pdev)
+{
+       optee_disable_shm_cache(platform_get_drvdata(pdev));
+}
+
 static int optee_probe(struct platform_device *pdev)
 {
        optee_invoke_fn *invoke_fn;
@@ -612,6 +633,16 @@ static int optee_probe(struct platform_device *pdev)
        u32 sec_caps;
        int rc;
 
+       /*
+        * The kernel may have crashed at the same time that all available
+        * secure world threads were suspended and we cannot reschedule the
+        * suspended threads without access to the crashed kernel's wait_queue.
+        * Therefore, we cannot reliably initialize the OP-TEE driver in the
+        * kdump kernel.
+        */
+       if (is_kdump_kernel())
+               return -ENODEV;
+
        invoke_fn = get_invoke_func(&pdev->dev);
        if (IS_ERR(invoke_fn))
                return PTR_ERR(invoke_fn);
@@ -686,6 +717,15 @@ static int optee_probe(struct platform_device *pdev)
        optee->memremaped_shm = memremaped_shm;
        optee->pool = pool;
 
+       /*
+        * Ensure that there are no pre-existing shm objects before enabling
+        * the shm cache so that there's no chance of receiving an invalid
+        * address during shutdown. This could occur, for example, if we're
+        * kexec booting from an older kernel that did not properly cleanup the
+        * shm cache.
+        */
+       optee_disable_unmapped_shm_cache(optee);
+
        optee_enable_shm_cache(optee);
 
        if (optee->sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
@@ -728,6 +768,7 @@ MODULE_DEVICE_TABLE(of, optee_dt_match);
 static struct platform_driver optee_driver = {
        .probe  = optee_probe,
        .remove = optee_remove,
+       .shutdown = optee_shutdown,
        .driver = {
                .name = "optee",
                .of_match_table = optee_dt_match,
index e25b216..dbdd367 100644 (file)
@@ -159,6 +159,7 @@ int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session);
 
 void optee_enable_shm_cache(struct optee *optee);
 void optee_disable_shm_cache(struct optee *optee);
+void optee_disable_unmapped_shm_cache(struct optee *optee);
 
 int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
                       struct page **pages, size_t num_pages,
index 1849180..efbaff7 100644 (file)
@@ -314,7 +314,7 @@ static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
                shm = cmd_alloc_suppl(ctx, sz);
                break;
        case OPTEE_RPC_SHM_TYPE_KERNEL:
-               shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED);
+               shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED | TEE_SHM_PRIV);
                break;
        default:
                arg->ret = TEEC_ERROR_BAD_PARAMETERS;
@@ -502,7 +502,8 @@ void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param,
 
        switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) {
        case OPTEE_SMC_RPC_FUNC_ALLOC:
-               shm = tee_shm_alloc(ctx, param->a1, TEE_SHM_MAPPED);
+               shm = tee_shm_alloc(ctx, param->a1,
+                                   TEE_SHM_MAPPED | TEE_SHM_PRIV);
                if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) {
                        reg_pair_from_64(&param->a1, &param->a2, pa);
                        reg_pair_from_64(&param->a4, &param->a5,
index d767eeb..c41a9a5 100644 (file)
@@ -27,13 +27,19 @@ static int pool_op_alloc(struct tee_shm_pool_mgr *poolm,
        shm->paddr = page_to_phys(page);
        shm->size = PAGE_SIZE << order;
 
-       if (shm->flags & TEE_SHM_DMA_BUF) {
+       /*
+        * Shared memory private to the OP-TEE driver doesn't need
+        * to be registered with OP-TEE.
+        */
+       if (!(shm->flags & TEE_SHM_PRIV)) {
                unsigned int nr_pages = 1 << order, i;
                struct page **pages;
 
                pages = kcalloc(nr_pages, sizeof(pages), GFP_KERNEL);
-               if (!pages)
-                       return -ENOMEM;
+               if (!pages) {
+                       rc = -ENOMEM;
+                       goto err;
+               }
 
                for (i = 0; i < nr_pages; i++) {
                        pages[i] = page;
@@ -44,15 +50,21 @@ static int pool_op_alloc(struct tee_shm_pool_mgr *poolm,
                rc = optee_shm_register(shm->ctx, shm, pages, nr_pages,
                                        (unsigned long)shm->kaddr);
                kfree(pages);
+               if (rc)
+                       goto err;
        }
 
+       return 0;
+
+err:
+       __free_pages(page, order);
        return rc;
 }
 
 static void pool_op_free(struct tee_shm_pool_mgr *poolm,
                         struct tee_shm *shm)
 {
-       if (shm->flags & TEE_SHM_DMA_BUF)
+       if (!(shm->flags & TEE_SHM_PRIV))
                optee_shm_unregister(shm->ctx, shm);
 
        free_pages((unsigned long)shm->kaddr, get_order(shm->size));
index 00472f5..8a9384a 100644 (file)
@@ -117,7 +117,7 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
                return ERR_PTR(-EINVAL);
        }
 
-       if ((flags & ~(TEE_SHM_MAPPED | TEE_SHM_DMA_BUF))) {
+       if ((flags & ~(TEE_SHM_MAPPED | TEE_SHM_DMA_BUF | TEE_SHM_PRIV))) {
                dev_err(teedev->dev.parent, "invalid shm flags 0x%x", flags);
                return ERR_PTR(-EINVAL);
        }
@@ -193,6 +193,24 @@ err_dev_put:
 }
 EXPORT_SYMBOL_GPL(tee_shm_alloc);
 
+/**
+ * tee_shm_alloc_kernel_buf() - Allocate shared memory for kernel buffer
+ * @ctx:       Context that allocates the shared memory
+ * @size:      Requested size of shared memory
+ *
+ * The returned memory registered in secure world and is suitable to be
+ * passed as a memory buffer in parameter argument to
+ * tee_client_invoke_func(). The memory allocated is later freed with a
+ * call to tee_shm_free().
+ *
+ * @returns a pointer to 'struct tee_shm'
+ */
+struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size)
+{
+       return tee_shm_alloc(ctx, size, TEE_SHM_MAPPED);
+}
+EXPORT_SYMBOL_GPL(tee_shm_alloc_kernel_buf);
+
 struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
                                 size_t length, u32 flags)
 {
index 83b1ef3..10d6b22 100644 (file)
@@ -1875,18 +1875,6 @@ static struct attribute *switch_attrs[] = {
        NULL,
 };
 
-static bool has_port(const struct tb_switch *sw, enum tb_port_type type)
-{
-       const struct tb_port *port;
-
-       tb_switch_for_each_port(sw, port) {
-               if (!port->disabled && port->config.type == type)
-                       return true;
-       }
-
-       return false;
-}
-
 static umode_t switch_attr_is_visible(struct kobject *kobj,
                                      struct attribute *attr, int n)
 {
@@ -1895,8 +1883,7 @@ static umode_t switch_attr_is_visible(struct kobject *kobj,
 
        if (attr == &dev_attr_authorized.attr) {
                if (sw->tb->security_level == TB_SECURITY_NOPCIE ||
-                   sw->tb->security_level == TB_SECURITY_DPONLY ||
-                   !has_port(sw, TB_TYPE_PCIE_UP))
+                   sw->tb->security_level == TB_SECURITY_DPONLY)
                        return 0;
        } else if (attr == &dev_attr_device.attr) {
                if (!sw->device)
index 4caab87..2350fb3 100644 (file)
@@ -329,6 +329,7 @@ static int aspeed_vuart_handle_irq(struct uart_port *port)
 {
        struct uart_8250_port *up = up_to_u8250p(port);
        unsigned int iir, lsr;
+       unsigned long flags;
        unsigned int space, count;
 
        iir = serial_port_in(port, UART_IIR);
@@ -336,7 +337,7 @@ static int aspeed_vuart_handle_irq(struct uart_port *port)
        if (iir & UART_IIR_NO_INT)
                return 0;
 
-       spin_lock(&port->lock);
+       spin_lock_irqsave(&port->lock, flags);
 
        lsr = serial_port_in(port, UART_LSR);
 
@@ -370,7 +371,7 @@ static int aspeed_vuart_handle_irq(struct uart_port *port)
        if (lsr & UART_LSR_THRE)
                serial8250_tx_chars(up);
 
-       uart_unlock_and_check_sysrq(port);
+       uart_unlock_and_check_sysrq_irqrestore(port, flags);
 
        return 1;
 }
index 4e75d2e..fc65a22 100644 (file)
@@ -30,10 +30,11 @@ struct fsl8250_data {
 int fsl8250_handle_irq(struct uart_port *port)
 {
        unsigned char lsr, orig_lsr;
+       unsigned long flags;
        unsigned int iir;
        struct uart_8250_port *up = up_to_u8250p(port);
 
-       spin_lock(&up->port.lock);
+       spin_lock_irqsave(&up->port.lock, flags);
 
        iir = port->serial_in(port, UART_IIR);
        if (iir & UART_IIR_NO_INT) {
@@ -82,7 +83,7 @@ int fsl8250_handle_irq(struct uart_port *port)
 
        up->lsr_saved_flags = orig_lsr;
 
-       uart_unlock_and_check_sysrq(&up->port);
+       uart_unlock_and_check_sysrq_irqrestore(&up->port, flags);
 
        return 1;
 }
index f7d3023..fb65dc6 100644 (file)
@@ -93,10 +93,13 @@ static void mtk8250_dma_rx_complete(void *param)
        struct dma_tx_state state;
        int copied, total, cnt;
        unsigned char *ptr;
+       unsigned long flags;
 
        if (data->rx_status == DMA_RX_SHUTDOWN)
                return;
 
+       spin_lock_irqsave(&up->port.lock, flags);
+
        dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
        total = dma->rx_size - state.residue;
        cnt = total;
@@ -120,6 +123,8 @@ static void mtk8250_dma_rx_complete(void *param)
        tty_flip_buffer_push(tty_port);
 
        mtk8250_rx_dma(up);
+
+       spin_unlock_irqrestore(&up->port.lock, flags);
 }
 
 static void mtk8250_rx_dma(struct uart_8250_port *up)
index 75827b6..a808c28 100644 (file)
@@ -3836,6 +3836,12 @@ static const struct pci_device_id blacklist[] = {
        { PCI_VDEVICE(INTEL, 0x0f0c), },
        { PCI_VDEVICE(INTEL, 0x228a), },
        { PCI_VDEVICE(INTEL, 0x228c), },
+       { PCI_VDEVICE(INTEL, 0x4b96), },
+       { PCI_VDEVICE(INTEL, 0x4b97), },
+       { PCI_VDEVICE(INTEL, 0x4b98), },
+       { PCI_VDEVICE(INTEL, 0x4b99), },
+       { PCI_VDEVICE(INTEL, 0x4b9a), },
+       { PCI_VDEVICE(INTEL, 0x4b9b), },
        { PCI_VDEVICE(INTEL, 0x9ce3), },
        { PCI_VDEVICE(INTEL, 0x9ce4), },
 
@@ -3996,6 +4002,7 @@ pciserial_init_ports(struct pci_dev *dev, const struct pciserial_board *board)
                if (pci_match_id(pci_use_msi, dev)) {
                        dev_dbg(&dev->dev, "Using MSI(-X) interrupts\n");
                        pci_set_master(dev);
+                       uart.port.flags &= ~UPF_SHARE_IRQ;
                        rc = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_ALL_TYPES);
                } else {
                        dev_dbg(&dev->dev, "Using legacy interrupts\n");
index 2164290..1da29a2 100644 (file)
@@ -311,7 +311,11 @@ static const struct serial8250_config uart_config[] = {
 /* Uart divisor latch read */
 static int default_serial_dl_read(struct uart_8250_port *up)
 {
-       return serial_in(up, UART_DLL) | serial_in(up, UART_DLM) << 8;
+       /* Assign these in pieces to truncate any bits above 7.  */
+       unsigned char dll = serial_in(up, UART_DLL);
+       unsigned char dlm = serial_in(up, UART_DLM);
+
+       return dll | dlm << 8;
 }
 
 /* Uart divisor latch write */
@@ -1297,9 +1301,11 @@ static void autoconfig(struct uart_8250_port *up)
        serial_out(up, UART_LCR, 0);
 
        serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
-       scratch = serial_in(up, UART_IIR) >> 6;
 
-       switch (scratch) {
+       /* Assign this as it is to truncate any bits above 7.  */
+       scratch = serial_in(up, UART_IIR);
+
+       switch (scratch >> 6) {
        case 0:
                autoconfig_8250(up);
                break;
@@ -1893,11 +1899,12 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
        unsigned char status;
        struct uart_8250_port *up = up_to_u8250p(port);
        bool skip_rx = false;
+       unsigned long flags;
 
        if (iir & UART_IIR_NO_INT)
                return 0;
 
-       spin_lock(&port->lock);
+       spin_lock_irqsave(&port->lock, flags);
 
        status = serial_port_in(port, UART_LSR);
 
@@ -1923,7 +1930,7 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
                (up->ier & UART_IER_THRI))
                serial8250_tx_chars(up);
 
-       uart_unlock_and_check_sysrq(port);
+       uart_unlock_and_check_sysrq_irqrestore(port, flags);
 
        return 1;
 }
index 508128d..f0e5da7 100644 (file)
@@ -1415,7 +1415,7 @@ static unsigned int lpuart_get_mctrl(struct uart_port *port)
 
 static unsigned int lpuart32_get_mctrl(struct uart_port *port)
 {
-       unsigned int mctrl = 0;
+       unsigned int mctrl = TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
        u32 reg;
 
        reg = lpuart32_read(port, UARTCTRL);
index 0c1e4df..ef11860 100644 (file)
@@ -1293,7 +1293,8 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
                freq = uartclk;
        if (freq == 0) {
                dev_err(dev, "Cannot get clock rate\n");
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out_clk;
        }
 
        if (xtal) {
index 2220327..eba5b9e 100644 (file)
@@ -1045,9 +1045,11 @@ static int tegra_uart_hw_init(struct tegra_uart_port *tup)
 
        if (tup->cdata->fifo_mode_enable_status) {
                ret = tegra_uart_wait_fifo_mode_enabled(tup);
-               dev_err(tup->uport.dev, "FIFO mode not enabled\n");
-               if (ret < 0)
+               if (ret < 0) {
+                       dev_err(tup->uport.dev,
+                               "Failed to enable FIFO mode: %d\n", ret);
                        return ret;
+               }
        } else {
                /*
                 * For all tegra devices (up to t210), there is a hardware
index ef981d3..cb72393 100644 (file)
@@ -2059,7 +2059,7 @@ static void restore_cur(struct vc_data *vc)
 
 enum { ESnormal, ESesc, ESsquare, ESgetpars, ESfunckey,
        EShash, ESsetG0, ESsetG1, ESpercent, EScsiignore, ESnonstd,
-       ESpalette, ESosc };
+       ESpalette, ESosc, ESapc, ESpm, ESdcs };
 
 /* console_lock is held (except via vc_init()) */
 static void reset_terminal(struct vc_data *vc, int do_clear)
@@ -2133,20 +2133,28 @@ static void vc_setGx(struct vc_data *vc, unsigned int which, int c)
                vc->vc_translate = set_translate(*charset, vc);
 }
 
+/* is this state an ANSI control string? */
+static bool ansi_control_string(unsigned int state)
+{
+       if (state == ESosc || state == ESapc || state == ESpm || state == ESdcs)
+               return true;
+       return false;
+}
+
 /* console_lock is held */
 static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
 {
        /*
         *  Control characters can be used in the _middle_
-        *  of an escape sequence.
+        *  of an escape sequence, aside from ANSI control strings.
         */
-       if (vc->vc_state == ESosc && c>=8 && c<=13) /* ... except for OSC */
+       if (ansi_control_string(vc->vc_state) && c >= 8 && c <= 13)
                return;
        switch (c) {
        case 0:
                return;
        case 7:
-               if (vc->vc_state == ESosc)
+               if (ansi_control_string(vc->vc_state))
                        vc->vc_state = ESnormal;
                else if (vc->vc_bell_duration)
                        kd_mksound(vc->vc_bell_pitch, vc->vc_bell_duration);
@@ -2207,6 +2215,12 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
                case ']':
                        vc->vc_state = ESnonstd;
                        return;
+               case '_':
+                       vc->vc_state = ESapc;
+                       return;
+               case '^':
+                       vc->vc_state = ESpm;
+                       return;
                case '%':
                        vc->vc_state = ESpercent;
                        return;
@@ -2224,6 +2238,9 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
                        if (vc->state.x < VC_TABSTOPS_COUNT)
                                set_bit(vc->state.x, vc->vc_tab_stop);
                        return;
+               case 'P':
+                       vc->vc_state = ESdcs;
+                       return;
                case 'Z':
                        respond_ID(tty);
                        return;
@@ -2520,8 +2537,14 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
                vc_setGx(vc, 1, c);
                vc->vc_state = ESnormal;
                return;
+       case ESapc:
+               return;
        case ESosc:
                return;
+       case ESpm:
+               return;
+       case ESdcs:
+               return;
        default:
                vc->vc_state = ESnormal;
        }
index 0e0cd9e..3639bb6 100644 (file)
@@ -246,6 +246,8 @@ int vt_waitactive(int n)
  *
  * XXX It should at least call into the driver, fbdev's definitely need to
  * restore their engine state. --BenH
+ *
+ * Called with the console lock held.
  */
 static int vt_kdsetmode(struct vc_data *vc, unsigned long mode)
 {
@@ -262,7 +264,6 @@ static int vt_kdsetmode(struct vc_data *vc, unsigned long mode)
                return -EINVAL;
        }
 
-       /* FIXME: this needs the console lock extending */
        if (vc->vc_mode == mode)
                return 0;
 
@@ -271,12 +272,10 @@ static int vt_kdsetmode(struct vc_data *vc, unsigned long mode)
                return 0;
 
        /* explicitly blank/unblank the screen if switching modes */
-       console_lock();
        if (mode == KD_TEXT)
                do_unblank_screen(1);
        else
                do_blank_screen(1);
-       console_unlock();
 
        return 0;
 }
@@ -378,7 +377,10 @@ static int vt_k_ioctl(struct tty_struct *tty, unsigned int cmd,
                if (!perm)
                        return -EPERM;
 
-               return vt_kdsetmode(vc, arg);
+               console_lock();
+               ret = vt_kdsetmode(vc, arg);
+               console_unlock();
+               return ret;
 
        case KDGETMODE:
                return put_user(vc->vc_mode, (int __user *)arg);
index 02ec7ab..e29989d 100644 (file)
@@ -731,6 +731,7 @@ static int cdns3_gadget_ep0_queue(struct usb_ep *ep,
                request->actual = 0;
                priv_dev->status_completion_no_call = true;
                priv_dev->pending_status_request = request;
+               usb_gadget_set_state(&priv_dev->gadget, USB_STATE_CONFIGURED);
                spin_unlock_irqrestore(&priv_dev->lock, flags);
 
                /*
index c23f53e..27df0c6 100644 (file)
@@ -1882,7 +1882,7 @@ static int __cdnsp_gadget_init(struct cdns *cdns)
        pdev->gadget.name = "cdnsp-gadget";
        pdev->gadget.speed = USB_SPEED_UNKNOWN;
        pdev->gadget.sg_supported = 1;
-       pdev->gadget.max_speed = USB_SPEED_SUPER_PLUS;
+       pdev->gadget.max_speed = max_speed;
        pdev->gadget.lpm_capable = 1;
 
        pdev->setup_buf = kzalloc(CDNSP_EP0_SETUP_SIZE, GFP_KERNEL);
index 783ca8f..f740fa6 100644 (file)
@@ -383,8 +383,8 @@ struct cdnsp_intr_reg {
 #define IMAN_IE                        BIT(1)
 #define IMAN_IP                        BIT(0)
 /* bits 2:31 need to be preserved */
-#define IMAN_IE_SET(p)         (((p) & IMAN_IE) | 0x2)
-#define IMAN_IE_CLEAR(p)       (((p) & IMAN_IE) & ~(0x2))
+#define IMAN_IE_SET(p)         ((p) | IMAN_IE)
+#define IMAN_IE_CLEAR(p)       ((p) & ~IMAN_IE)
 
 /* IMOD - Interrupter Moderation Register - irq_control bitmasks. */
 /*
index 6897274..1b14384 100644 (file)
@@ -1932,15 +1932,13 @@ int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq)
                }
 
                if (enqd_len + trb_buff_len >= full_len) {
-                       if (need_zero_pkt && zero_len_trb) {
-                               zero_len_trb = true;
-                       } else {
-                               field &= ~TRB_CHAIN;
-                               field |= TRB_IOC;
-                               more_trbs_coming = false;
-                               need_zero_pkt = false;
-                               preq->td.last_trb = ring->enqueue;
-                       }
+                       if (need_zero_pkt)
+                               zero_len_trb = !zero_len_trb;
+
+                       field &= ~TRB_CHAIN;
+                       field |= TRB_IOC;
+                       more_trbs_coming = false;
+                       preq->td.last_trb = ring->enqueue;
                }
 
                /* Only set interrupt on short packet for OUT endpoints. */
@@ -1955,7 +1953,7 @@ int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq)
                length_field = TRB_LEN(trb_buff_len) | TRB_TD_SIZE(remainder) |
                        TRB_INTR_TARGET(0);
 
-               cdnsp_queue_trb(pdev, ring, more_trbs_coming | need_zero_pkt,
+               cdnsp_queue_trb(pdev, ring, more_trbs_coming | zero_len_trb,
                                lower_32_bits(send_addr),
                                upper_32_bits(send_addr),
                                length_field,
index 74d5a9c..73f419a 100644 (file)
@@ -2324,17 +2324,10 @@ static void usbtmc_interrupt(struct urb *urb)
                dev_err(dev, "overflow with length %d, actual length is %d\n",
                        data->iin_wMaxPacketSize, urb->actual_length);
                fallthrough;
-       case -ECONNRESET:
-       case -ENOENT:
-       case -ESHUTDOWN:
-       case -EILSEQ:
-       case -ETIME:
-       case -EPIPE:
+       default:
                /* urb terminated, clean up */
                dev_dbg(dev, "urb terminated, status: %d\n", status);
                return;
-       default:
-               dev_err(dev, "unknown status received: %d\n", status);
        }
 exit:
        rv = usb_submit_urb(urb, GFP_ATOMIC);
index 3740cf9..0697fde 100644 (file)
@@ -193,7 +193,11 @@ static void otg_start_hnp_polling(struct otg_fsm *fsm)
        if (!fsm->host_req_flag)
                return;
 
-       INIT_DELAYED_WORK(&fsm->hnp_polling_work, otg_hnp_polling_work);
+       if (!fsm->hnp_work_inited) {
+               INIT_DELAYED_WORK(&fsm->hnp_polling_work, otg_hnp_polling_work);
+               fsm->hnp_work_inited = true;
+       }
+
        schedule_delayed_work(&fsm->hnp_polling_work,
                                        msecs_to_jiffies(T_HOST_REQ_POLL));
 }
index 45f2bc0..ccb68fe 100644 (file)
@@ -940,19 +940,19 @@ static struct dwc3_trb *dwc3_ep_prev_trb(struct dwc3_ep *dep, u8 index)
 
 static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep)
 {
-       struct dwc3_trb         *tmp;
        u8                      trbs_left;
 
        /*
-        * If enqueue & dequeue are equal than it is either full or empty.
-        *
-        * One way to know for sure is if the TRB right before us has HWO bit
-        * set or not. If it has, then we're definitely full and can't fit any
-        * more transfers in our ring.
+        * If the enqueue & dequeue are equal then the TRB ring is either full
+        * or empty. It's considered full when there are DWC3_TRB_NUM-1 of TRBs
+        * pending to be processed by the driver.
         */
        if (dep->trb_enqueue == dep->trb_dequeue) {
-               tmp = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
-               if (tmp->ctrl & DWC3_TRB_CTRL_HWO)
+               /*
+                * If there is any request remained in the started_list at
+                * this point, that means there is no TRB available.
+                */
+               if (!list_empty(&dep->started_list))
                        return 0;
 
                return DWC3_TRB_NUM - 1;
@@ -2243,10 +2243,19 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
 
                ret = wait_for_completion_timeout(&dwc->ep0_in_setup,
                                msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT));
-               if (ret == 0) {
-                       dev_err(dwc->dev, "timed out waiting for SETUP phase\n");
-                       return -ETIMEDOUT;
-               }
+               if (ret == 0)
+                       dev_warn(dwc->dev, "timed out waiting for SETUP phase\n");
+       }
+
+       /*
+        * Avoid issuing a runtime resume if the device is already in the
+        * suspended state during gadget disconnect.  DWC3 gadget was already
+        * halted/stopped during runtime suspend.
+        */
+       if (!is_on) {
+               pm_runtime_barrier(dwc->dev);
+               if (pm_runtime_suspended(dwc->dev))
+                       return 0;
        }
 
        /*
@@ -2447,6 +2456,7 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
        /* begin to receive SETUP packets */
        dwc->ep0state = EP0_SETUP_PHASE;
        dwc->link_state = DWC3_LINK_STATE_SS_DIS;
+       dwc->delayed_status = false;
        dwc3_ep0_out_start(dwc);
 
        dwc3_gadget_enable_irq(dwc);
index 02683ac..bb476e1 100644 (file)
@@ -41,6 +41,7 @@ struct f_hidg {
        unsigned char                   bInterfaceSubClass;
        unsigned char                   bInterfaceProtocol;
        unsigned char                   protocol;
+       unsigned char                   idle;
        unsigned short                  report_desc_length;
        char                            *report_desc;
        unsigned short                  report_length;
@@ -338,6 +339,11 @@ static ssize_t f_hidg_write(struct file *file, const char __user *buffer,
 
        spin_lock_irqsave(&hidg->write_spinlock, flags);
 
+       if (!hidg->req) {
+               spin_unlock_irqrestore(&hidg->write_spinlock, flags);
+               return -ESHUTDOWN;
+       }
+
 #define WRITE_COND (!hidg->write_pending)
 try_again:
        /* write queue */
@@ -358,8 +364,14 @@ try_again:
        count  = min_t(unsigned, count, hidg->report_length);
 
        spin_unlock_irqrestore(&hidg->write_spinlock, flags);
-       status = copy_from_user(req->buf, buffer, count);
 
+       if (!req) {
+               ERROR(hidg->func.config->cdev, "hidg->req is NULL\n");
+               status = -ESHUTDOWN;
+               goto release_write_pending;
+       }
+
+       status = copy_from_user(req->buf, buffer, count);
        if (status != 0) {
                ERROR(hidg->func.config->cdev,
                        "copy_from_user error\n");
@@ -387,14 +399,17 @@ try_again:
 
        spin_unlock_irqrestore(&hidg->write_spinlock, flags);
 
+       if (!hidg->in_ep->enabled) {
+               ERROR(hidg->func.config->cdev, "in_ep is disabled\n");
+               status = -ESHUTDOWN;
+               goto release_write_pending;
+       }
+
        status = usb_ep_queue(hidg->in_ep, req, GFP_ATOMIC);
-       if (status < 0) {
-               ERROR(hidg->func.config->cdev,
-                       "usb_ep_queue error on int endpoint %zd\n", status);
+       if (status < 0)
                goto release_write_pending;
-       } else {
+       else
                status = count;
-       }
 
        return status;
 release_write_pending:
@@ -523,6 +538,14 @@ static int hidg_setup(struct usb_function *f,
                goto respond;
                break;
 
+       case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
+                 | HID_REQ_GET_IDLE):
+               VDBG(cdev, "get_idle\n");
+               length = min_t(unsigned int, length, 1);
+               ((u8 *) req->buf)[0] = hidg->idle;
+               goto respond;
+               break;
+
        case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
                  | HID_REQ_SET_REPORT):
                VDBG(cdev, "set_report | wLength=%d\n", ctrl->wLength);
@@ -546,6 +569,14 @@ static int hidg_setup(struct usb_function *f,
                goto stall;
                break;
 
+       case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
+                 | HID_REQ_SET_IDLE):
+               VDBG(cdev, "set_idle\n");
+               length = 0;
+               hidg->idle = value >> 8;
+               goto respond;
+               break;
+
        case ((USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_INTERFACE) << 8
                  | USB_REQ_GET_DESCRIPTOR):
                switch (value >> 8) {
@@ -773,6 +804,7 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
        hidg_interface_desc.bInterfaceSubClass = hidg->bInterfaceSubClass;
        hidg_interface_desc.bInterfaceProtocol = hidg->bInterfaceProtocol;
        hidg->protocol = HID_REPORT_PROTOCOL;
+       hidg->idle = 1;
        hidg_ss_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
        hidg_ss_in_comp_desc.wBytesPerInterval =
                                cpu_to_le16(hidg->report_length);
index 018dd09..9e5c950 100644 (file)
@@ -230,7 +230,13 @@ static void u_audio_iso_fback_complete(struct usb_ep *ep,
        int status = req->status;
 
        /* i/f shutting down */
-       if (!prm->fb_ep_enabled || req->status == -ESHUTDOWN)
+       if (!prm->fb_ep_enabled) {
+               kfree(req->buf);
+               usb_ep_free_request(ep, req);
+               return;
+       }
+
+       if (req->status == -ESHUTDOWN)
                return;
 
        /*
@@ -388,8 +394,6 @@ static inline void free_ep(struct uac_rtd_params *prm, struct usb_ep *ep)
        if (!prm->ep_enabled)
                return;
 
-       prm->ep_enabled = false;
-
        audio_dev = uac->audio_dev;
        params = &audio_dev->params;
 
@@ -407,6 +411,8 @@ static inline void free_ep(struct uac_rtd_params *prm, struct usb_ep *ep)
                }
        }
 
+       prm->ep_enabled = false;
+
        if (usb_ep_disable(ep))
                dev_err(uac->card->dev, "%s:%d Error!\n", __func__, __LINE__);
 }
@@ -418,15 +424,16 @@ static inline void free_ep_fback(struct uac_rtd_params *prm, struct usb_ep *ep)
        if (!prm->fb_ep_enabled)
                return;
 
-       prm->fb_ep_enabled = false;
-
        if (prm->req_fback) {
-               usb_ep_dequeue(ep, prm->req_fback);
-               kfree(prm->req_fback->buf);
-               usb_ep_free_request(ep, prm->req_fback);
+               if (usb_ep_dequeue(ep, prm->req_fback)) {
+                       kfree(prm->req_fback->buf);
+                       usb_ep_free_request(ep, prm->req_fback);
+               }
                prm->req_fback = NULL;
        }
 
+       prm->fb_ep_enabled = false;
+
        if (usb_ep_disable(ep))
                dev_err(uac->card->dev, "%s:%d Error!\n", __func__, __LINE__);
 }
index 34f4db5..d2a2b20 100644 (file)
@@ -1255,12 +1255,14 @@ static int max3420_probe(struct spi_device *spi)
        err = devm_request_irq(&spi->dev, irq, max3420_irq_handler, 0,
                               "max3420", udc);
        if (err < 0)
-               return err;
+               goto del_gadget;
 
        udc->thread_task = kthread_create(max3420_thread, udc,
                                          "max3420-thread");
-       if (IS_ERR(udc->thread_task))
-               return PTR_ERR(udc->thread_task);
+       if (IS_ERR(udc->thread_task)) {
+               err = PTR_ERR(udc->thread_task);
+               goto del_gadget;
+       }
 
        irq = of_irq_get_byname(spi->dev.of_node, "vbus");
        if (irq <= 0) { /* no vbus irq implies self-powered design */
@@ -1280,10 +1282,14 @@ static int max3420_probe(struct spi_device *spi)
                err = devm_request_irq(&spi->dev, irq,
                                       max3420_vbus_handler, 0, "vbus", udc);
                if (err < 0)
-                       return err;
+                       goto del_gadget;
        }
 
        return 0;
+
+del_gadget:
+       usb_del_gadget_udc(&udc->gadget);
+       return err;
 }
 
 static int max3420_remove(struct spi_device *spi)
index 9bbd7dd..a24aea3 100644 (file)
@@ -611,8 +611,6 @@ ohci_hcd_at91_drv_suspend(struct device *dev)
        if (ohci_at91->wakeup)
                enable_irq_wake(hcd->irq);
 
-       ohci_at91_port_suspend(ohci_at91->sfr_regmap, 1);
-
        ret = ohci_suspend(hcd, ohci_at91->wakeup);
        if (ret) {
                if (ohci_at91->wakeup)
@@ -632,7 +630,10 @@ ohci_hcd_at91_drv_suspend(struct device *dev)
                /* flush the writes */
                (void) ohci_readl (ohci, &ohci->regs->control);
                msleep(1);
+               ohci_at91_port_suspend(ohci_at91->sfr_regmap, 1);
                at91_stop_clock(ohci_at91);
+       } else {
+               ohci_at91_port_suspend(ohci_at91->sfr_regmap, 1);
        }
 
        return ret;
@@ -644,6 +645,8 @@ ohci_hcd_at91_drv_resume(struct device *dev)
        struct usb_hcd  *hcd = dev_get_drvdata(dev);
        struct ohci_at91_priv *ohci_at91 = hcd_to_ohci_at91_priv(hcd);
 
+       ohci_at91_port_suspend(ohci_at91->sfr_regmap, 0);
+
        if (ohci_at91->wakeup)
                disable_irq_wake(hcd->irq);
        else
@@ -651,8 +654,6 @@ ohci_hcd_at91_drv_resume(struct device *dev)
 
        ohci_resume(hcd, false);
 
-       ohci_at91_port_suspend(ohci_at91->sfr_regmap, 0);
-
        return 0;
 }
 
index 5923844..ef5e91a 100644 (file)
@@ -207,7 +207,8 @@ static int renesas_check_rom_state(struct pci_dev *pdev)
                        return 0;
 
                case RENESAS_ROM_STATUS_NO_RESULT: /* No result yet */
-                       return 0;
+                       dev_dbg(&pdev->dev, "Unknown ROM status ...\n");
+                       return -ENOENT;
 
                case RENESAS_ROM_STATUS_ERROR: /* Error State */
                default: /* All other states are marked as "Reserved states" */
@@ -224,14 +225,6 @@ static int renesas_fw_check_running(struct pci_dev *pdev)
        u8 fw_state;
        int err;
 
-       /* Check if device has ROM and loaded, if so skip everything */
-       err = renesas_check_rom(pdev);
-       if (err) { /* we have rom */
-               err = renesas_check_rom_state(pdev);
-               if (!err)
-                       return err;
-       }
-
        /*
         * Test if the device is actually needing the firmware. As most
         * BIOSes will initialize the device for us. If the device is
@@ -591,21 +584,39 @@ int renesas_xhci_check_request_fw(struct pci_dev *pdev,
                        (struct xhci_driver_data *)id->driver_data;
        const char *fw_name = driver_data->firmware;
        const struct firmware *fw;
+       bool has_rom;
        int err;
 
+       /* Check if device has ROM and loaded, if so skip everything */
+       has_rom = renesas_check_rom(pdev);
+       if (has_rom) {
+               err = renesas_check_rom_state(pdev);
+               if (!err)
+                       return 0;
+               else if (err != -ENOENT)
+                       has_rom = false;
+       }
+
        err = renesas_fw_check_running(pdev);
        /* Continue ahead, if the firmware is already running. */
        if (err == 0)
                return 0;
 
+       /* no firmware interface available */
        if (err != 1)
-               return err;
+               return has_rom ? 0 : err;
 
        pci_dev_get(pdev);
-       err = request_firmware(&fw, fw_name, &pdev->dev);
+       err = firmware_request_nowarn(&fw, fw_name, &pdev->dev);
        pci_dev_put(pdev);
        if (err) {
-               dev_err(&pdev->dev, "request_firmware failed: %d\n", err);
+               if (has_rom) {
+                       dev_info(&pdev->dev, "failed to load firmware %s, fallback to ROM\n",
+                                fw_name);
+                       return 0;
+               }
+               dev_err(&pdev->dev, "failed to load firmware %s: %d\n",
+                       fw_name, err);
                return err;
        }
 
index 640a46f..f086960 100644 (file)
@@ -35,6 +35,7 @@ struct omap2430_glue {
        struct device           *control_otghs;
        unsigned int            is_runtime_suspended:1;
        unsigned int            needs_resume:1;
+       unsigned int            phy_suspended:1;
 };
 #define glue_to_musb(g)                platform_get_drvdata(g->musb)
 
@@ -458,8 +459,10 @@ static int omap2430_runtime_suspend(struct device *dev)
 
        omap2430_low_level_exit(musb);
 
-       phy_power_off(musb->phy);
-       phy_exit(musb->phy);
+       if (!glue->phy_suspended) {
+               phy_power_off(musb->phy);
+               phy_exit(musb->phy);
+       }
 
        glue->is_runtime_suspended = 1;
 
@@ -474,8 +477,10 @@ static int omap2430_runtime_resume(struct device *dev)
        if (!musb)
                return 0;
 
-       phy_init(musb->phy);
-       phy_power_on(musb->phy);
+       if (!glue->phy_suspended) {
+               phy_init(musb->phy);
+               phy_power_on(musb->phy);
+       }
 
        omap2430_low_level_init(musb);
        musb_writel(musb->mregs, OTG_INTERFSEL,
@@ -489,7 +494,21 @@ static int omap2430_runtime_resume(struct device *dev)
        return 0;
 }
 
+/* I2C and SPI PHYs need to be suspended before the glue layer */
 static int omap2430_suspend(struct device *dev)
+{
+       struct omap2430_glue *glue = dev_get_drvdata(dev);
+       struct musb *musb = glue_to_musb(glue);
+
+       phy_power_off(musb->phy);
+       phy_exit(musb->phy);
+       glue->phy_suspended = 1;
+
+       return 0;
+}
+
+/* Glue layer needs to be suspended after musb_suspend() */
+static int omap2430_suspend_late(struct device *dev)
 {
        struct omap2430_glue *glue = dev_get_drvdata(dev);
 
@@ -501,7 +520,7 @@ static int omap2430_suspend(struct device *dev)
        return omap2430_runtime_suspend(dev);
 }
 
-static int omap2430_resume(struct device *dev)
+static int omap2430_resume_early(struct device *dev)
 {
        struct omap2430_glue *glue = dev_get_drvdata(dev);
 
@@ -513,10 +532,24 @@ static int omap2430_resume(struct device *dev)
        return omap2430_runtime_resume(dev);
 }
 
+static int omap2430_resume(struct device *dev)
+{
+       struct omap2430_glue *glue = dev_get_drvdata(dev);
+       struct musb *musb = glue_to_musb(glue);
+
+       phy_init(musb->phy);
+       phy_power_on(musb->phy);
+       glue->phy_suspended = 0;
+
+       return 0;
+}
+
 static const struct dev_pm_ops omap2430_pm_ops = {
        .runtime_suspend = omap2430_runtime_suspend,
        .runtime_resume = omap2430_runtime_resume,
        .suspend = omap2430_suspend,
+       .suspend_late = omap2430_suspend_late,
+       .resume_early = omap2430_resume_early,
        .resume = omap2430_resume,
 };
 
index 4a1f3a9..33bbb34 100644 (file)
@@ -219,6 +219,7 @@ static const struct usb_device_id id_table_combined[] = {
        { USB_DEVICE(FTDI_VID, FTDI_MTXORB_6_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_R2000KU_TRUE_RNG) },
        { USB_DEVICE(FTDI_VID, FTDI_VARDAAN_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_AUTO_M3_OP_COM_V2_PID) },
        { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0100_PID) },
        { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0101_PID) },
        { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0102_PID) },
index add602b..755858c 100644 (file)
 /* Vardaan Enterprises Serial Interface VEUSB422R3 */
 #define FTDI_VARDAAN_PID       0xF070
 
+/* Auto-M3 Ltd. - OP-COM USB V2 - OBD interface Adapter */
+#define FTDI_AUTO_M3_OP_COM_V2_PID     0x4f50
+
 /*
  * Xsens Technologies BV products (http://www.xsens.com).
  */
index 0fbe253..29c765c 100644 (file)
@@ -1203,6 +1203,8 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = NCTRL(2) | RSVD(3) },
        { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1055, 0xff),    /* Telit FN980 (PCIe) */
          .driver_info = NCTRL(0) | RSVD(1) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1056, 0xff),    /* Telit FD980 */
+         .driver_info = NCTRL(2) | RSVD(3) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
          .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
@@ -2072,6 +2074,8 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = RSVD(4) | RSVD(5) },
        { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff),                     /* Fibocom NL678 series */
          .driver_info = RSVD(6) },
+       { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) },    /* Fibocom FG150 Diag */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) },          /* Fibocom FG150 AT */
        { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) },                   /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
        { USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) },                   /* LongSung M5710 */
        { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) },                   /* GosunCn GM500 RNDIS */
index 2f2f504..930b3d5 100644 (file)
@@ -418,24 +418,34 @@ static int pl2303_detect_type(struct usb_serial *serial)
        bcdDevice = le16_to_cpu(desc->bcdDevice);
        bcdUSB = le16_to_cpu(desc->bcdUSB);
 
-       switch (bcdDevice) {
-       case 0x100:
-               /*
-                * Assume it's an HXN-type if the device doesn't support the old read
-                * request value.
-                */
-               if (bcdUSB == 0x200 && !pl2303_supports_hx_status(serial))
-                       return TYPE_HXN;
+       switch (bcdUSB) {
+       case 0x110:
+               switch (bcdDevice) {
+               case 0x300:
+                       return TYPE_HX;
+               case 0x400:
+                       return TYPE_HXD;
+               default:
+                       return TYPE_HX;
+               }
                break;
-       case 0x300:
-               if (bcdUSB == 0x200)
+       case 0x200:
+               switch (bcdDevice) {
+               case 0x100:
+               case 0x305:
+                       /*
+                        * Assume it's an HXN-type if the device doesn't
+                        * support the old read request value.
+                        */
+                       if (!pl2303_supports_hx_status(serial))
+                               return TYPE_HXN;
+                       break;
+               case 0x300:
                        return TYPE_TA;
-
-               return TYPE_HX;
-       case 0x400:
-               return TYPE_HXD;
-       case 0x500:
-               return TYPE_TB;
+               case 0x500:
+                       return TYPE_TB;
+               }
+               break;
        }
 
        dev_err(&serial->interface->dev,
index 5b22a1c..5d05de6 100644 (file)
@@ -341,6 +341,7 @@ struct tcpm_port {
        bool vbus_source;
        bool vbus_charge;
 
+       /* Set to true when Discover_Identity Command is expected to be sent in Ready states. */
        bool send_discover;
        bool op_vsafe5v;
 
@@ -370,6 +371,7 @@ struct tcpm_port {
        struct hrtimer send_discover_timer;
        struct kthread_work send_discover_work;
        bool state_machine_running;
+       /* Set to true when VDM State Machine has following actions. */
        bool vdm_sm_running;
 
        struct completion tx_complete;
@@ -1431,6 +1433,7 @@ static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header,
        /* Set ready, vdm state machine will actually send */
        port->vdm_retries = 0;
        port->vdm_state = VDM_STATE_READY;
+       port->vdm_sm_running = true;
 
        mod_vdm_delayed_work(port, 0);
 }
@@ -1673,7 +1676,6 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
                                rlen = 1;
                        } else {
                                tcpm_register_partner_altmodes(port);
-                               port->vdm_sm_running = false;
                        }
                        break;
                case CMD_ENTER_MODE:
@@ -1721,14 +1723,12 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
                                      (VDO_SVDM_VERS(svdm_version));
                        break;
                }
-               port->vdm_sm_running = false;
                break;
        default:
                response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
                rlen = 1;
                response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
                              (VDO_SVDM_VERS(svdm_version));
-               port->vdm_sm_running = false;
                break;
        }
 
@@ -1737,6 +1737,10 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
        return rlen;
 }
 
+static void tcpm_pd_handle_msg(struct tcpm_port *port,
+                              enum pd_msg_request message,
+                              enum tcpm_ams ams);
+
 static void tcpm_handle_vdm_request(struct tcpm_port *port,
                                    const __le32 *payload, int cnt)
 {
@@ -1764,11 +1768,25 @@ static void tcpm_handle_vdm_request(struct tcpm_port *port,
                port->vdm_state = VDM_STATE_DONE;
        }
 
-       if (PD_VDO_SVDM(p[0])) {
+       if (PD_VDO_SVDM(p[0]) && (adev || tcpm_vdm_ams(port) || port->nr_snk_vdo)) {
+               /*
+                * Here a SVDM is received (INIT or RSP or unknown). Set the vdm_sm_running in
+                * advance because we are dropping the lock but may send VDMs soon.
+                * For the cases of INIT received:
+                *  - If no response to send, it will be cleared later in this function.
+                *  - If there are responses to send, it will be cleared in the state machine.
+                * For the cases of RSP received:
+                *  - If no further INIT to send, it will be cleared later in this function.
+                *  - Otherwise, it will be cleared in the state machine if timeout or it will go
+                *    back here until no further INIT to send.
+                * For the cases of unknown type received:
+                *  - We will send NAK and the flag will be cleared in the state machine.
+                */
+               port->vdm_sm_running = true;
                rlen = tcpm_pd_svdm(port, adev, p, cnt, response, &adev_action);
        } else {
                if (port->negotiated_rev >= PD_REV30)
-                       tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
+                       tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
        }
 
        /*
@@ -1833,6 +1851,8 @@ static void tcpm_handle_vdm_request(struct tcpm_port *port,
 
        if (rlen > 0)
                tcpm_queue_vdm(port, response[0], &response[1], rlen - 1);
+       else
+               port->vdm_sm_running = false;
 }
 
 static void tcpm_send_vdm(struct tcpm_port *port, u32 vid, int cmd,
@@ -1898,8 +1918,10 @@ static void vdm_run_state_machine(struct tcpm_port *port)
                 * if there's traffic or we're not in PDO ready state don't send
                 * a VDM.
                 */
-               if (port->state != SRC_READY && port->state != SNK_READY)
+               if (port->state != SRC_READY && port->state != SNK_READY) {
+                       port->vdm_sm_running = false;
                        break;
+               }
 
                /* TODO: AMS operation for Unstructured VDM */
                if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMDT(vdo_hdr) == CMDT_INIT) {
@@ -2471,10 +2493,7 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
                                           NONE_AMS);
                break;
        case PD_DATA_VENDOR_DEF:
-               if (tcpm_vdm_ams(port) || port->nr_snk_vdo)
-                       tcpm_handle_vdm_request(port, msg->payload, cnt);
-               else if (port->negotiated_rev > PD_REV20)
-                       tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
+               tcpm_handle_vdm_request(port, msg->payload, cnt);
                break;
        case PD_DATA_BIST:
                port->bist_request = le32_to_cpu(msg->payload[0]);
@@ -2555,10 +2574,6 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
                                                                       TYPEC_PWR_MODE_PD,
                                                                       port->pps_data.active,
                                                                       port->supply_voltage);
-                               /* Set VDM running flag ASAP */
-                               if (port->data_role == TYPEC_HOST &&
-                                   port->send_discover)
-                                       port->vdm_sm_running = true;
                                tcpm_set_state(port, SNK_READY, 0);
                        } else {
                                /*
@@ -2596,14 +2611,10 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
                switch (port->state) {
                case SNK_NEGOTIATE_CAPABILITIES:
                        /* USB PD specification, Figure 8-43 */
-                       if (port->explicit_contract) {
+                       if (port->explicit_contract)
                                next_state = SNK_READY;
-                               if (port->data_role == TYPEC_HOST &&
-                                   port->send_discover)
-                                       port->vdm_sm_running = true;
-                       } else {
+                       else
                                next_state = SNK_WAIT_CAPABILITIES;
-                       }
 
                        /* Threshold was relaxed before sending Request. Restore it back. */
                        tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
@@ -2618,10 +2629,6 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
                        port->pps_status = (type == PD_CTRL_WAIT ?
                                            -EAGAIN : -EOPNOTSUPP);
 
-                       if (port->data_role == TYPEC_HOST &&
-                           port->send_discover)
-                               port->vdm_sm_running = true;
-
                        /* Threshold was relaxed before sending Request. Restore it back. */
                        tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
                                                               port->pps_data.active,
@@ -2697,10 +2704,6 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
                        }
                        break;
                case DR_SWAP_SEND:
-                       if (port->data_role == TYPEC_DEVICE &&
-                           port->send_discover)
-                               port->vdm_sm_running = true;
-
                        tcpm_set_state(port, DR_SWAP_CHANGE_DR, 0);
                        break;
                case PR_SWAP_SEND:
@@ -2738,7 +2741,7 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
                                           PD_MSG_CTRL_NOT_SUPP,
                                           NONE_AMS);
                } else {
-                       if (port->vdm_sm_running) {
+                       if (port->send_discover) {
                                tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
                                break;
                        }
@@ -2754,7 +2757,7 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
                                           PD_MSG_CTRL_NOT_SUPP,
                                           NONE_AMS);
                } else {
-                       if (port->vdm_sm_running) {
+                       if (port->send_discover) {
                                tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
                                break;
                        }
@@ -2763,7 +2766,7 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
                }
                break;
        case PD_CTRL_VCONN_SWAP:
-               if (port->vdm_sm_running) {
+               if (port->send_discover) {
                        tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
                        break;
                }
@@ -4479,18 +4482,20 @@ static void run_state_machine(struct tcpm_port *port)
        /* DR_Swap states */
        case DR_SWAP_SEND:
                tcpm_pd_send_control(port, PD_CTRL_DR_SWAP);
+               if (port->data_role == TYPEC_DEVICE || port->negotiated_rev > PD_REV20)
+                       port->send_discover = true;
                tcpm_set_state_cond(port, DR_SWAP_SEND_TIMEOUT,
                                    PD_T_SENDER_RESPONSE);
                break;
        case DR_SWAP_ACCEPT:
                tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
-               /* Set VDM state machine running flag ASAP */
-               if (port->data_role == TYPEC_DEVICE && port->send_discover)
-                       port->vdm_sm_running = true;
+               if (port->data_role == TYPEC_DEVICE || port->negotiated_rev > PD_REV20)
+                       port->send_discover = true;
                tcpm_set_state_cond(port, DR_SWAP_CHANGE_DR, 0);
                break;
        case DR_SWAP_SEND_TIMEOUT:
                tcpm_swap_complete(port, -ETIMEDOUT);
+               port->send_discover = false;
                tcpm_ams_finish(port);
                tcpm_set_state(port, ready_state(port), 0);
                break;
@@ -4502,7 +4507,6 @@ static void run_state_machine(struct tcpm_port *port)
                } else {
                        tcpm_set_roles(port, true, port->pwr_role,
                                       TYPEC_HOST);
-                       port->send_discover = true;
                }
                tcpm_ams_finish(port);
                tcpm_set_state(port, ready_state(port), 0);
@@ -4645,8 +4649,6 @@ static void run_state_machine(struct tcpm_port *port)
                break;
        case VCONN_SWAP_SEND_TIMEOUT:
                tcpm_swap_complete(port, -ETIMEDOUT);
-               if (port->data_role == TYPEC_HOST && port->send_discover)
-                       port->vdm_sm_running = true;
                tcpm_set_state(port, ready_state(port), 0);
                break;
        case VCONN_SWAP_START:
@@ -4662,14 +4664,10 @@ static void run_state_machine(struct tcpm_port *port)
        case VCONN_SWAP_TURN_ON_VCONN:
                tcpm_set_vconn(port, true);
                tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
-               if (port->data_role == TYPEC_HOST && port->send_discover)
-                       port->vdm_sm_running = true;
                tcpm_set_state(port, ready_state(port), 0);
                break;
        case VCONN_SWAP_TURN_OFF_VCONN:
                tcpm_set_vconn(port, false);
-               if (port->data_role == TYPEC_HOST && port->send_discover)
-                       port->vdm_sm_running = true;
                tcpm_set_state(port, ready_state(port), 0);
                break;
 
@@ -4677,8 +4675,6 @@ static void run_state_machine(struct tcpm_port *port)
        case PR_SWAP_CANCEL:
        case VCONN_SWAP_CANCEL:
                tcpm_swap_complete(port, port->swap_status);
-               if (port->data_role == TYPEC_HOST && port->send_discover)
-                       port->vdm_sm_running = true;
                if (port->pwr_role == TYPEC_SOURCE)
                        tcpm_set_state(port, SRC_READY, 0);
                else
@@ -5028,9 +5024,6 @@ static void _tcpm_pd_vbus_on(struct tcpm_port *port)
        switch (port->state) {
        case SNK_TRANSITION_SINK_VBUS:
                port->explicit_contract = true;
-               /* Set the VDM flag ASAP */
-               if (port->data_role == TYPEC_HOST && port->send_discover)
-                       port->vdm_sm_running = true;
                tcpm_set_state(port, SNK_READY, 0);
                break;
        case SNK_DISCOVERY:
@@ -5369,7 +5362,7 @@ EXPORT_SYMBOL_GPL(tcpm_pd_hard_reset);
 void tcpm_sink_frs(struct tcpm_port *port)
 {
        spin_lock(&port->pd_event_lock);
-       port->pd_events = TCPM_FRS_EVENT;
+       port->pd_events |= TCPM_FRS_EVENT;
        spin_unlock(&port->pd_event_lock);
        kthread_queue_work(port->wq, &port->event_work);
 }
@@ -5378,7 +5371,7 @@ EXPORT_SYMBOL_GPL(tcpm_sink_frs);
 void tcpm_sourcing_vbus(struct tcpm_port *port)
 {
        spin_lock(&port->pd_event_lock);
-       port->pd_events = TCPM_SOURCING_VBUS;
+       port->pd_events |= TCPM_SOURCING_VBUS;
        spin_unlock(&port->pd_event_lock);
        kthread_queue_work(port->wq, &port->event_work);
 }
@@ -5425,15 +5418,18 @@ static void tcpm_send_discover_work(struct kthread_work *work)
        if (!port->send_discover)
                goto unlock;
 
+       if (port->data_role == TYPEC_DEVICE && port->negotiated_rev < PD_REV30) {
+               port->send_discover = false;
+               goto unlock;
+       }
+
        /* Retry if the port is not idle */
        if ((port->state != SRC_READY && port->state != SNK_READY) || port->vdm_sm_running) {
                mod_send_discover_delayed_work(port, SEND_DISCOVER_RETRY_MS);
                goto unlock;
        }
 
-       /* Only send the Message if the port is host for PD rev2.0 */
-       if (port->data_role == TYPEC_HOST || port->negotiated_rev > PD_REV20)
-               tcpm_send_vdm(port, USB_SID_PD, CMD_DISCOVER_IDENT, NULL, 0);
+       tcpm_send_vdm(port, USB_SID_PD, CMD_DISCOVER_IDENT, NULL, 0);
 
 unlock:
        mutex_unlock(&port->lock);
index 21b78f1..351c6cf 100644 (file)
@@ -493,9 +493,9 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
                                    dev, &ifc_vdpa_ops, NULL);
-       if (adapter == NULL) {
+       if (IS_ERR(adapter)) {
                IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
-               return -ENOMEM;
+               return PTR_ERR(adapter);
        }
 
        pci_set_master(pdev);
index dcee603..e59135f 100644 (file)
@@ -512,11 +512,6 @@ out:
        mutex_unlock(&mr->mkey_mtx);
 }
 
-static bool map_empty(struct vhost_iotlb *iotlb)
-{
-       return !vhost_iotlb_itree_first(iotlb, 0, U64_MAX);
-}
-
 int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
                             bool *change_map)
 {
@@ -524,10 +519,6 @@ int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *io
        int err = 0;
 
        *change_map = false;
-       if (map_empty(iotlb)) {
-               mlx5_vdpa_destroy_mr(mvdev);
-               return 0;
-       }
        mutex_lock(&mr->mkey_mtx);
        if (mr->initialized) {
                mlx5_vdpa_info(mvdev, "memory map update\n");
index 2a31467..3cc12fc 100644 (file)
@@ -526,7 +526,6 @@ static int cq_create(struct mlx5_vdpa_net *ndev, u16 idx, u32 num_ent)
        void __iomem *uar_page = ndev->mvdev.res.uar->map;
        u32 out[MLX5_ST_SZ_DW(create_cq_out)];
        struct mlx5_vdpa_cq *vcq = &mvq->cq;
-       unsigned int irqn;
        __be64 *pas;
        int inlen;
        void *cqc;
@@ -566,7 +565,7 @@ static int cq_create(struct mlx5_vdpa_net *ndev, u16 idx, u32 num_ent)
        /* Use vector 0 by default. Consider adding code to choose least used
         * vector.
         */
-       err = mlx5_vector2eqn(mdev, 0, &eqn, &irqn);
+       err = mlx5_vector2eqn(mdev, 0, &eqn);
        if (err)
                goto err_vec;
 
@@ -753,12 +752,12 @@ static int get_queue_type(struct mlx5_vdpa_net *ndev)
        type_mask = MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, virtio_queue_type);
 
        /* prefer split queue */
-       if (type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED)
-               return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED;
+       if (type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT)
+               return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT;
 
-       WARN_ON(!(type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT));
+       WARN_ON(!(type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED));
 
-       return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT;
+       return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED;
 }
 
 static bool vq_is_tx(u16 idx)
@@ -2030,6 +2029,12 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name)
                return -ENOSPC;
 
        mdev = mgtdev->madev->mdev;
+       if (!(MLX5_CAP_DEV_VDPA_EMULATION(mdev, virtio_queue_type) &
+           MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT)) {
+               dev_warn(mdev->device, "missing support for split virtqueues\n");
+               return -EOPNOTSUPP;
+       }
+
        /* we save one virtqueue for control virtqueue should we require it */
        max_vqs = MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues);
        max_vqs = min_t(u32, max_vqs, MLX5_MAX_SUPPORTED_VQS);
index 14e024d..c621cf7 100644 (file)
@@ -251,8 +251,10 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr)
 
        vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops,
                                    dev_attr->name);
-       if (!vdpasim)
+       if (IS_ERR(vdpasim)) {
+               ret = PTR_ERR(vdpasim);
                goto err_alloc;
+       }
 
        vdpasim->dev_attr = *dev_attr;
        INIT_WORK(&vdpasim->work, dev_attr->work_fn);
index 7b4a639..fe05273 100644 (file)
@@ -436,9 +436,9 @@ static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        vp_vdpa = vdpa_alloc_device(struct vp_vdpa, vdpa,
                                    dev, &vp_vdpa_ops, NULL);
-       if (vp_vdpa == NULL) {
+       if (IS_ERR(vp_vdpa)) {
                dev_err(dev, "vp_vdpa: Failed to allocate vDPA structure\n");
-               return -ENOMEM;
+               return PTR_ERR(vp_vdpa);
        }
 
        mdev = &vp_vdpa->mdev;
index 210ab35..9479f7f 100644 (file)
@@ -614,7 +614,8 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
        long pinned;
        int ret = 0;
 
-       if (msg->iova < v->range.first ||
+       if (msg->iova < v->range.first || !msg->size ||
+           msg->iova > U64_MAX - msg->size + 1 ||
            msg->iova + msg->size - 1 > v->range.last)
                return -EINVAL;
 
index b9e853e..59edb5a 100644 (file)
@@ -735,10 +735,16 @@ static bool log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
                         (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
 }
 
+/* Make sure 64 bit math will not overflow. */
 static bool vhost_overflow(u64 uaddr, u64 size)
 {
-       /* Make sure 64 bit math will not overflow. */
-       return uaddr > ULONG_MAX || size > ULONG_MAX || uaddr > ULONG_MAX - size;
+       if (uaddr > ULONG_MAX || size > ULONG_MAX)
+               return true;
+
+       if (!size)
+               return false;
+
+       return uaddr > ULONG_MAX - size + 1;
 }
 
 /* Caller should have vq mutex and device mutex. */
index 4af8fa2..14e2043 100644 (file)
@@ -359,7 +359,7 @@ __vringh_iov(struct vringh *vrh, u16 i,
                        iov = wiov;
                else {
                        iov = riov;
-                       if (unlikely(wiov && wiov->i)) {
+                       if (unlikely(wiov && wiov->used)) {
                                vringh_bad("Readable desc %p after writable",
                                           &descs[i]);
                                err = -EINVAL;
index 0d002a3..fbc9f10 100644 (file)
@@ -64,6 +64,14 @@ int acrn_vm_destroy(struct acrn_vm *vm)
            test_and_set_bit(ACRN_VM_FLAG_DESTROYED, &vm->flags))
                return 0;
 
+       ret = hcall_destroy_vm(vm->vmid);
+       if (ret < 0) {
+               dev_err(acrn_dev.this_device,
+                       "Failed to destroy VM %u\n", vm->vmid);
+               clear_bit(ACRN_VM_FLAG_DESTROYED, &vm->flags);
+               return ret;
+       }
+
        /* Remove from global VM list */
        write_lock_bh(&acrn_vm_list_lock);
        list_del_init(&vm->list);
@@ -78,14 +86,6 @@ int acrn_vm_destroy(struct acrn_vm *vm)
                vm->monitor_page = NULL;
        }
 
-       ret = hcall_destroy_vm(vm->vmid);
-       if (ret < 0) {
-               dev_err(acrn_dev.this_device,
-                       "Failed to destroy VM %u\n", vm->vmid);
-               clear_bit(ACRN_VM_FLAG_DESTROYED, &vm->flags);
-               return ret;
-       }
-
        acrn_vm_all_ram_unmap(vm);
 
        dev_dbg(acrn_dev.this_device, "VM %u destroyed.\n", vm->vmid);
index 4b15c00..49984d2 100644 (file)
@@ -355,6 +355,7 @@ int register_virtio_device(struct virtio_device *dev)
        virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
 
        INIT_LIST_HEAD(&dev->vqs);
+       spin_lock_init(&dev->vqs_list_lock);
 
        /*
         * device_add() causes the bus infrastructure to look for a matching
index 09ed55d..b91bc81 100644 (file)
@@ -1242,12 +1242,19 @@ static void virtio_mem_online_page_cb(struct page *page, unsigned int order)
                        do_online = virtio_mem_bbm_get_bb_state(vm, id) !=
                                    VIRTIO_MEM_BBM_BB_FAKE_OFFLINE;
                }
+
+               /*
+                * virtio_mem_set_fake_offline() might sleep, we don't need
+                * the device anymore. See virtio_mem_remove() how races
+                * between memory onlining and device removal are handled.
+                */
+               rcu_read_unlock();
+
                if (do_online)
                        generic_online_page(page, order);
                else
                        virtio_mem_set_fake_offline(PFN_DOWN(addr), 1 << order,
                                                    false);
-               rcu_read_unlock();
                return;
        }
        rcu_read_unlock();
index 222d630..b35bb2d 100644 (file)
@@ -576,6 +576,13 @@ static void virtio_pci_remove(struct pci_dev *pci_dev)
        struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
        struct device *dev = get_device(&vp_dev->vdev.dev);
 
+       /*
+        * Device is marked broken on surprise removal so that virtio upper
+        * layers can abort any ongoing operation.
+        */
+       if (!pci_device_is_present(pci_dev))
+               virtio_break_device(&vp_dev->vdev);
+
        pci_disable_sriov(pci_dev);
 
        unregister_virtio_device(&vp_dev->vdev);
index 89bfe46..dd95dfd 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/module.h>
 #include <linux/hrtimer.h>
 #include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
 #include <xen/xen.h>
 
 #ifdef DEBUG
@@ -1755,7 +1756,9 @@ static struct virtqueue *vring_create_virtqueue_packed(
                        cpu_to_le16(vq->packed.event_flags_shadow);
        }
 
+       spin_lock(&vdev->vqs_list_lock);
        list_add_tail(&vq->vq.list, &vdev->vqs);
+       spin_unlock(&vdev->vqs_list_lock);
        return &vq->vq;
 
 err_desc_extra:
@@ -2229,7 +2232,9 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
        memset(vq->split.desc_state, 0, vring.num *
                        sizeof(struct vring_desc_state_split));
 
+       spin_lock(&vdev->vqs_list_lock);
        list_add_tail(&vq->vq.list, &vdev->vqs);
+       spin_unlock(&vdev->vqs_list_lock);
        return &vq->vq;
 
 err_extra:
@@ -2291,6 +2296,10 @@ void vring_del_virtqueue(struct virtqueue *_vq)
 {
        struct vring_virtqueue *vq = to_vvq(_vq);
 
+       spin_lock(&vq->vq.vdev->vqs_list_lock);
+       list_del(&_vq->list);
+       spin_unlock(&vq->vq.vdev->vqs_list_lock);
+
        if (vq->we_own_ring) {
                if (vq->packed_ring) {
                        vring_free_queue(vq->vq.vdev,
@@ -2321,7 +2330,6 @@ void vring_del_virtqueue(struct virtqueue *_vq)
                kfree(vq->split.desc_state);
                kfree(vq->split.desc_extra);
        }
-       list_del(&_vq->list);
        kfree(vq);
 }
 EXPORT_SYMBOL_GPL(vring_del_virtqueue);
@@ -2373,7 +2381,7 @@ bool virtqueue_is_broken(struct virtqueue *_vq)
 {
        struct vring_virtqueue *vq = to_vvq(_vq);
 
-       return vq->broken;
+       return READ_ONCE(vq->broken);
 }
 EXPORT_SYMBOL_GPL(virtqueue_is_broken);
 
@@ -2385,10 +2393,14 @@ void virtio_break_device(struct virtio_device *dev)
 {
        struct virtqueue *_vq;
 
+       spin_lock(&dev->vqs_list_lock);
        list_for_each_entry(_vq, &dev->vqs, list) {
                struct vring_virtqueue *vq = to_vvq(_vq);
-               vq->broken = true;
+
+               /* Pairs with READ_ONCE() in virtqueue_is_broken(). */
+               WRITE_ONCE(vq->broken, true);
        }
+       spin_unlock(&dev->vqs_list_lock);
 }
 EXPORT_SYMBOL_GPL(virtio_break_device);
 
index e1a1411..72eaef2 100644 (file)
@@ -151,6 +151,9 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
        if (!name)
                return NULL;
 
+       if (index >= vdpa->nvqs)
+               return ERR_PTR(-ENOENT);
+
        /* Queue shouldn't already be set up. */
        if (ops->get_vq_ready(vdpa, index))
                return ERR_PTR(-ENOENT);
index d7e361f..a78704a 100644 (file)
@@ -198,12 +198,12 @@ static void disable_dynirq(struct irq_data *data);
 
 static DEFINE_PER_CPU(unsigned int, irq_epoch);
 
-static void clear_evtchn_to_irq_row(unsigned row)
+static void clear_evtchn_to_irq_row(int *evtchn_row)
 {
        unsigned col;
 
        for (col = 0; col < EVTCHN_PER_ROW; col++)
-               WRITE_ONCE(evtchn_to_irq[row][col], -1);
+               WRITE_ONCE(evtchn_row[col], -1);
 }
 
 static void clear_evtchn_to_irq_all(void)
@@ -213,7 +213,7 @@ static void clear_evtchn_to_irq_all(void)
        for (row = 0; row < EVTCHN_ROW(xen_evtchn_max_channels()); row++) {
                if (evtchn_to_irq[row] == NULL)
                        continue;
-               clear_evtchn_to_irq_row(row);
+               clear_evtchn_to_irq_row(evtchn_to_irq[row]);
        }
 }
 
@@ -221,6 +221,7 @@ static int set_evtchn_to_irq(evtchn_port_t evtchn, unsigned int irq)
 {
        unsigned row;
        unsigned col;
+       int *evtchn_row;
 
        if (evtchn >= xen_evtchn_max_channels())
                return -EINVAL;
@@ -233,11 +234,18 @@ static int set_evtchn_to_irq(evtchn_port_t evtchn, unsigned int irq)
                if (irq == -1)
                        return 0;
 
-               evtchn_to_irq[row] = (int *)get_zeroed_page(GFP_KERNEL);
-               if (evtchn_to_irq[row] == NULL)
+               evtchn_row = (int *) __get_free_pages(GFP_KERNEL, 0);
+               if (evtchn_row == NULL)
                        return -ENOMEM;
 
-               clear_evtchn_to_irq_row(row);
+               clear_evtchn_to_irq_row(evtchn_row);
+
+               /*
+                * We've prepared an empty row for the mapping. If a different
+                * thread was faster inserting it, we can drop ours.
+                */
+               if (cmpxchg(&evtchn_to_irq[row], NULL, evtchn_row) != NULL)
+                       free_page((unsigned long) evtchn_row);
        }
 
        WRITE_ONCE(evtchn_to_irq[row][col], irq);
@@ -1009,7 +1017,7 @@ static void __unbind_from_irq(unsigned int irq)
 int xen_bind_pirq_gsi_to_irq(unsigned gsi,
                             unsigned pirq, int shareable, char *name)
 {
-       int irq = -1;
+       int irq;
        struct physdev_irq irq_op;
        int ret;
 
index 59c32c9..c4a2dc4 100644 (file)
@@ -121,10 +121,6 @@ static int v9fs_file_lock(struct file *filp, int cmd, struct file_lock *fl)
 
        p9_debug(P9_DEBUG_VFS, "filp: %p lock: %p\n", filp, fl);
 
-       /* No mandatory locks */
-       if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
-               return -ENOLCK;
-
        if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
                filemap_write_and_wait(inode->i_mapping);
                invalidate_mapping_pages(&inode->i_data, 0, -1);
@@ -312,10 +308,6 @@ static int v9fs_file_lock_dotl(struct file *filp, int cmd, struct file_lock *fl)
        p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %pD\n",
                 filp, cmd, fl, filp);
 
-       /* No mandatory locks */
-       if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
-               goto out_err;
-
        if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
                filemap_write_and_wait(inode->i_mapping);
                invalidate_mapping_pages(&inode->i_data, 0, -1);
@@ -327,7 +319,6 @@ static int v9fs_file_lock_dotl(struct file *filp, int cmd, struct file_lock *fl)
                ret = v9fs_file_getlock(filp, fl);
        else
                ret = -EINVAL;
-out_err:
        return ret;
 }
 
@@ -348,10 +339,6 @@ static int v9fs_file_flock_dotl(struct file *filp, int cmd,
        p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %pD\n",
                 filp, cmd, fl, filp);
 
-       /* No mandatory locks */
-       if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
-               goto out_err;
-
        if (!(fl->fl_flags & FL_FLOCK))
                goto out_err;
 
index a7749c1..949128b 100644 (file)
@@ -101,16 +101,6 @@ config FILE_LOCKING
           for filesystems like NFS and for the flock() system
           call. Disabling this option saves about 11k.
 
-config MANDATORY_FILE_LOCKING
-       bool "Enable Mandatory file locking"
-       depends on FILE_LOCKING
-       default y
-       help
-         This option enables files appropriately marked files on appropriely
-         mounted filesystems to support mandatory locking.
-
-         To the best of my knowledge this is dead code that no one cares about.
-
 source "fs/crypto/Kconfig"
 
 source "fs/verity/Kconfig"
index 06fb7a9..4d5ae61 100644 (file)
@@ -168,21 +168,6 @@ config OSF4_COMPAT
          with v4 shared libraries freely available from Compaq. If you're
          going to use shared libraries from Tru64 version 5.0 or later, say N.
 
-config BINFMT_EM86
-       tristate "Kernel support for Linux/Intel ELF binaries"
-       depends on ALPHA
-       help
-         Say Y here if you want to be able to execute Linux/Intel ELF
-         binaries just like native Alpha binaries on your Alpha machine. For
-         this to work, you need to have the emulator /usr/bin/em86 in place.
-
-         You can get the same functionality by saying N here and saying Y to
-         "Kernel support for MISC binaries".
-
-         You may answer M to compile the emulation support as a module and
-         later load the module when you want to use a Linux/Intel binary. The
-         module will be called binfmt_em86. If unsure, say Y.
-
 config BINFMT_MISC
        tristate "Kernel support for MISC binaries"
        help
index 9c708e1..f98f3e6 100644 (file)
@@ -39,7 +39,6 @@ obj-$(CONFIG_FS_ENCRYPTION)   += crypto/
 obj-$(CONFIG_FS_VERITY)                += verity/
 obj-$(CONFIG_FILE_LOCKING)      += locks.o
 obj-$(CONFIG_BINFMT_AOUT)      += binfmt_aout.o
-obj-$(CONFIG_BINFMT_EM86)      += binfmt_em86.o
 obj-$(CONFIG_BINFMT_MISC)      += binfmt_misc.o
 obj-$(CONFIG_BINFMT_SCRIPT)    += binfmt_script.o
 obj-$(CONFIG_BINFMT_ELF)       += binfmt_elf.o
index cb3054c..c4210a3 100644 (file)
@@ -772,10 +772,6 @@ int afs_lock(struct file *file, int cmd, struct file_lock *fl)
               fl->fl_type, fl->fl_flags,
               (long long) fl->fl_start, (long long) fl->fl_end);
 
-       /* AFS doesn't support mandatory locks */
-       if (__mandatory_lock(&vnode->vfs_inode) && fl->fl_type != F_UNLCK)
-               return -ENOLCK;
-
        if (IS_GETLK(cmd))
                return afs_do_getlk(file, fl);
 
index 76ce0cc..51b08ab 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1695,7 +1695,7 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
                list_del(&iocb->ki_list);
                iocb->ki_res.res = mangle_poll(mask);
                req->done = true;
-               if (iocb->ki_eventfd && eventfd_signal_count()) {
+               if (iocb->ki_eventfd && eventfd_signal_allowed()) {
                        iocb = NULL;
                        INIT_WORK(&req->work, aio_poll_put_work);
                        schedule_work(&req->work);
diff --git a/fs/binfmt_em86.c b/fs/binfmt_em86.c
deleted file mode 100644 (file)
index 06b9b9f..0000000
+++ /dev/null
@@ -1,110 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- *  linux/fs/binfmt_em86.c
- *
- *  Based on linux/fs/binfmt_script.c
- *  Copyright (C) 1996  Martin von Löwis
- *  original #!-checking implemented by tytso.
- *
- *  em86 changes Copyright (C) 1997  Jim Paradis
- */
-
-#include <linux/module.h>
-#include <linux/string.h>
-#include <linux/stat.h>
-#include <linux/binfmts.h>
-#include <linux/elf.h>
-#include <linux/init.h>
-#include <linux/fs.h>
-#include <linux/file.h>
-#include <linux/errno.h>
-
-
-#define EM86_INTERP    "/usr/bin/em86"
-#define EM86_I_NAME    "em86"
-
-static int load_em86(struct linux_binprm *bprm)
-{
-       const char *i_name, *i_arg;
-       char *interp;
-       struct file * file;
-       int retval;
-       struct elfhdr   elf_ex;
-
-       /* Make sure this is a Linux/Intel ELF executable... */
-       elf_ex = *((struct elfhdr *)bprm->buf);
-
-       if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
-               return  -ENOEXEC;
-
-       /* First of all, some simple consistency checks */
-       if ((elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN) ||
-               (!((elf_ex.e_machine == EM_386) || (elf_ex.e_machine == EM_486))) ||
-               !bprm->file->f_op->mmap) {
-                       return -ENOEXEC;
-       }
-
-       /* Need to be able to load the file after exec */
-       if (bprm->interp_flags & BINPRM_FLAGS_PATH_INACCESSIBLE)
-               return -ENOENT;
-
-       /* Unlike in the script case, we don't have to do any hairy
-        * parsing to find our interpreter... it's hardcoded!
-        */
-       interp = EM86_INTERP;
-       i_name = EM86_I_NAME;
-       i_arg = NULL;           /* We reserve the right to add an arg later */
-
-       /*
-        * Splice in (1) the interpreter's name for argv[0]
-        *           (2) (optional) argument to interpreter
-        *           (3) filename of emulated file (replace argv[0])
-        *
-        * This is done in reverse order, because of how the
-        * user environment and arguments are stored.
-        */
-       remove_arg_zero(bprm);
-       retval = copy_string_kernel(bprm->filename, bprm);
-       if (retval < 0) return retval; 
-       bprm->argc++;
-       if (i_arg) {
-               retval = copy_string_kernel(i_arg, bprm);
-               if (retval < 0) return retval; 
-               bprm->argc++;
-       }
-       retval = copy_string_kernel(i_name, bprm);
-       if (retval < 0) return retval;
-       bprm->argc++;
-
-       /*
-        * OK, now restart the process with the interpreter's inode.
-        * Note that we use open_exec() as the name is now in kernel
-        * space, and we don't need to copy it.
-        */
-       file = open_exec(interp);
-       if (IS_ERR(file))
-               return PTR_ERR(file);
-
-       bprm->interpreter = file;
-       return 0;
-}
-
-static struct linux_binfmt em86_format = {
-       .module         = THIS_MODULE,
-       .load_binary    = load_em86,
-};
-
-static int __init init_em86_binfmt(void)
-{
-       register_binfmt(&em86_format);
-       return 0;
-}
-
-static void __exit exit_em86_binfmt(void)
-{
-       unregister_binfmt(&em86_format);
-}
-
-core_initcall(init_em86_binfmt);
-module_exit(exit_em86_binfmt);
-MODULE_LICENSE("GPL");
index 0c424a0..1f21ac9 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/uaccess.h>
 #include <linux/suspend.h>
 #include "internal.h"
+#include "../block/blk.h"
 
 struct bdev_inode {
        struct block_device bdev;
@@ -686,7 +687,8 @@ static loff_t block_llseek(struct file *file, loff_t offset, int whence)
        return retval;
 }
        
-int blkdev_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
+static int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
+               int datasync)
 {
        struct inode *bd_inode = bdev_file_inode(filp);
        struct block_device *bdev = I_BDEV(bd_inode);
@@ -707,7 +709,6 @@ int blkdev_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
 
        return error;
 }
-EXPORT_SYMBOL(blkdev_fsync);
 
 /**
  * bdev_read_page() - Start reading a page from a block device
@@ -801,7 +802,6 @@ static struct inode *bdev_alloc_inode(struct super_block *sb)
        if (!ei)
                return NULL;
        memset(&ei->bdev, 0, sizeof(ei->bdev));
-       ei->bdev.bd_bdi = &noop_backing_dev_info;
        return &ei->vfs_inode;
 }
 
@@ -812,6 +812,15 @@ static void bdev_free_inode(struct inode *inode)
        free_percpu(bdev->bd_stats);
        kfree(bdev->bd_meta_info);
 
+       if (!bdev_is_partition(bdev)) {
+               if (bdev->bd_disk && bdev->bd_disk->bdi)
+                       bdi_put(bdev->bd_disk->bdi);
+               kfree(bdev->bd_disk);
+       }
+
+       if (MAJOR(bdev->bd_dev) == BLOCK_EXT_MAJOR)
+               blk_free_ext_minor(MINOR(bdev->bd_dev));
+
        kmem_cache_free(bdev_cachep, BDEV_I(inode));
 }
 
@@ -824,16 +833,9 @@ static void init_once(void *data)
 
 static void bdev_evict_inode(struct inode *inode)
 {
-       struct block_device *bdev = &BDEV_I(inode)->bdev;
        truncate_inode_pages_final(&inode->i_data);
        invalidate_inode_buffers(inode); /* is it needed here? */
        clear_inode(inode);
-       /* Detach inode from wb early as bdi_put() may free bdi->wb */
-       inode_detach_wb(inode);
-       if (bdev->bd_bdi != &noop_backing_dev_info) {
-               bdi_put(bdev->bd_bdi);
-               bdev->bd_bdi = &noop_backing_dev_info;
-       }
 }
 
 static const struct super_operations bdev_sops = {
@@ -900,9 +902,6 @@ struct block_device *bdev_alloc(struct gendisk *disk, u8 partno)
        bdev->bd_disk = disk;
        bdev->bd_partno = partno;
        bdev->bd_inode = inode;
-#ifdef CONFIG_SYSFS
-       INIT_LIST_HEAD(&bdev->bd_holder_disks);
-#endif
        bdev->bd_stats = alloc_percpu(struct disk_stats);
        if (!bdev->bd_stats) {
                iput(inode);
@@ -919,31 +918,6 @@ void bdev_add(struct block_device *bdev, dev_t dev)
        insert_inode_hash(bdev->bd_inode);
 }
 
-static struct block_device *bdget(dev_t dev)
-{
-       struct inode *inode;
-
-       inode = ilookup(blockdev_superblock, dev);
-       if (!inode)
-               return NULL;
-       return &BDEV_I(inode)->bdev;
-}
-
-/**
- * bdgrab -- Grab a reference to an already referenced block device
- * @bdev:      Block device to grab a reference to.
- *
- * Returns the block_device with an additional reference when successful,
- * or NULL if the inode is already beeing freed.
- */
-struct block_device *bdgrab(struct block_device *bdev)
-{
-       if (!igrab(bdev->bd_inode))
-               return NULL;
-       return bdev;
-}
-EXPORT_SYMBOL(bdgrab);
-
 long nr_blockdev_pages(void)
 {
        struct inode *inode;
@@ -957,12 +931,6 @@ long nr_blockdev_pages(void)
        return ret;
 }
 
-void bdput(struct block_device *bdev)
-{
-       iput(bdev->bd_inode);
-}
-EXPORT_SYMBOL(bdput);
 /**
  * bd_may_claim - test whether a block device can be claimed
  * @bdev: block device of interest
@@ -1092,148 +1060,6 @@ void bd_abort_claiming(struct block_device *bdev, void *holder)
 }
 EXPORT_SYMBOL(bd_abort_claiming);
 
-#ifdef CONFIG_SYSFS
-struct bd_holder_disk {
-       struct list_head        list;
-       struct gendisk          *disk;
-       int                     refcnt;
-};
-
-static struct bd_holder_disk *bd_find_holder_disk(struct block_device *bdev,
-                                                 struct gendisk *disk)
-{
-       struct bd_holder_disk *holder;
-
-       list_for_each_entry(holder, &bdev->bd_holder_disks, list)
-               if (holder->disk == disk)
-                       return holder;
-       return NULL;
-}
-
-static int add_symlink(struct kobject *from, struct kobject *to)
-{
-       return sysfs_create_link(from, to, kobject_name(to));
-}
-
-static void del_symlink(struct kobject *from, struct kobject *to)
-{
-       sysfs_remove_link(from, kobject_name(to));
-}
-
-/**
- * bd_link_disk_holder - create symlinks between holding disk and slave bdev
- * @bdev: the claimed slave bdev
- * @disk: the holding disk
- *
- * DON'T USE THIS UNLESS YOU'RE ALREADY USING IT.
- *
- * This functions creates the following sysfs symlinks.
- *
- * - from "slaves" directory of the holder @disk to the claimed @bdev
- * - from "holders" directory of the @bdev to the holder @disk
- *
- * For example, if /dev/dm-0 maps to /dev/sda and disk for dm-0 is
- * passed to bd_link_disk_holder(), then:
- *
- *   /sys/block/dm-0/slaves/sda --> /sys/block/sda
- *   /sys/block/sda/holders/dm-0 --> /sys/block/dm-0
- *
- * The caller must have claimed @bdev before calling this function and
- * ensure that both @bdev and @disk are valid during the creation and
- * lifetime of these symlinks.
- *
- * CONTEXT:
- * Might sleep.
- *
- * RETURNS:
- * 0 on success, -errno on failure.
- */
-int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk)
-{
-       struct bd_holder_disk *holder;
-       int ret = 0;
-
-       mutex_lock(&bdev->bd_disk->open_mutex);
-
-       WARN_ON_ONCE(!bdev->bd_holder);
-
-       /* FIXME: remove the following once add_disk() handles errors */
-       if (WARN_ON(!disk->slave_dir || !bdev->bd_holder_dir))
-               goto out_unlock;
-
-       holder = bd_find_holder_disk(bdev, disk);
-       if (holder) {
-               holder->refcnt++;
-               goto out_unlock;
-       }
-
-       holder = kzalloc(sizeof(*holder), GFP_KERNEL);
-       if (!holder) {
-               ret = -ENOMEM;
-               goto out_unlock;
-       }
-
-       INIT_LIST_HEAD(&holder->list);
-       holder->disk = disk;
-       holder->refcnt = 1;
-
-       ret = add_symlink(disk->slave_dir, bdev_kobj(bdev));
-       if (ret)
-               goto out_free;
-
-       ret = add_symlink(bdev->bd_holder_dir, &disk_to_dev(disk)->kobj);
-       if (ret)
-               goto out_del;
-       /*
-        * bdev could be deleted beneath us which would implicitly destroy
-        * the holder directory.  Hold on to it.
-        */
-       kobject_get(bdev->bd_holder_dir);
-
-       list_add(&holder->list, &bdev->bd_holder_disks);
-       goto out_unlock;
-
-out_del:
-       del_symlink(disk->slave_dir, bdev_kobj(bdev));
-out_free:
-       kfree(holder);
-out_unlock:
-       mutex_unlock(&bdev->bd_disk->open_mutex);
-       return ret;
-}
-EXPORT_SYMBOL_GPL(bd_link_disk_holder);
-
-/**
- * bd_unlink_disk_holder - destroy symlinks created by bd_link_disk_holder()
- * @bdev: the calimed slave bdev
- * @disk: the holding disk
- *
- * DON'T USE THIS UNLESS YOU'RE ALREADY USING IT.
- *
- * CONTEXT:
- * Might sleep.
- */
-void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk)
-{
-       struct bd_holder_disk *holder;
-
-       mutex_lock(&bdev->bd_disk->open_mutex);
-
-       holder = bd_find_holder_disk(bdev, disk);
-
-       if (!WARN_ON_ONCE(holder == NULL) && !--holder->refcnt) {
-               del_symlink(disk->slave_dir, bdev_kobj(bdev));
-               del_symlink(bdev->bd_holder_dir, &disk_to_dev(disk)->kobj);
-               kobject_put(bdev->bd_holder_dir);
-               list_del_init(&holder->list);
-               kfree(holder);
-       }
-
-       mutex_unlock(&bdev->bd_disk->open_mutex);
-}
-EXPORT_SYMBOL_GPL(bd_unlink_disk_holder);
-#endif
-
 static void blkdev_flush_mapping(struct block_device *bdev)
 {
        WARN_ON_ONCE(bdev->bd_holders);
@@ -1258,11 +1084,8 @@ static int blkdev_get_whole(struct block_device *bdev, fmode_t mode)
                }
        }
 
-       if (!bdev->bd_openers) {
+       if (!bdev->bd_openers)
                set_init_blocksize(bdev);
-               if (bdev->bd_bdi == &noop_backing_dev_info)
-                       bdev->bd_bdi = bdi_get(disk->queue->backing_dev_info);
-       }
        if (test_bit(GD_NEED_PART_SCAN, &disk->state))
                bdev_disk_changed(disk, false);
        bdev->bd_openers++;
@@ -1280,16 +1103,14 @@ static void blkdev_put_whole(struct block_device *bdev, fmode_t mode)
 static int blkdev_get_part(struct block_device *part, fmode_t mode)
 {
        struct gendisk *disk = part->bd_disk;
-       struct block_device *whole;
        int ret;
 
        if (part->bd_openers)
                goto done;
 
-       whole = bdgrab(disk->part0);
-       ret = blkdev_get_whole(whole, mode);
+       ret = blkdev_get_whole(bdev_whole(part), mode);
        if (ret)
-               goto out_put_whole;
+               return ret;
 
        ret = -ENXIO;
        if (!bdev_nr_sectors(part))
@@ -1297,16 +1118,12 @@ static int blkdev_get_part(struct block_device *part, fmode_t mode)
 
        disk->open_partitions++;
        set_init_blocksize(part);
-       if (part->bd_bdi == &noop_backing_dev_info)
-               part->bd_bdi = bdi_get(disk->queue->backing_dev_info);
 done:
        part->bd_openers++;
        return 0;
 
 out_blkdev_put:
-       blkdev_put_whole(whole, mode);
-out_put_whole:
-       bdput(whole);
+       blkdev_put_whole(bdev_whole(part), mode);
        return ret;
 }
 
@@ -1319,42 +1136,42 @@ static void blkdev_put_part(struct block_device *part, fmode_t mode)
        blkdev_flush_mapping(part);
        whole->bd_disk->open_partitions--;
        blkdev_put_whole(whole, mode);
-       bdput(whole);
 }
 
 struct block_device *blkdev_get_no_open(dev_t dev)
 {
        struct block_device *bdev;
-       struct gendisk *disk;
+       struct inode *inode;
 
-       bdev = bdget(dev);
-       if (!bdev) {
+       inode = ilookup(blockdev_superblock, dev);
+       if (!inode) {
                blk_request_module(dev);
-               bdev = bdget(dev);
-               if (!bdev)
+               inode = ilookup(blockdev_superblock, dev);
+               if (!inode)
                        return NULL;
        }
 
-       disk = bdev->bd_disk;
-       if (!kobject_get_unless_zero(&disk_to_dev(disk)->kobj))
-               goto bdput;
-       if ((disk->flags & (GENHD_FL_UP | GENHD_FL_HIDDEN)) != GENHD_FL_UP)
-               goto put_disk;
-       if (!try_module_get(bdev->bd_disk->fops->owner))
-               goto put_disk;
+       /* switch from the inode reference to a device mode one: */
+       bdev = &BDEV_I(inode)->bdev;
+       if (!kobject_get_unless_zero(&bdev->bd_device.kobj))
+               bdev = NULL;
+       iput(inode);
+
+       if (!bdev)
+               return NULL;
+       if ((bdev->bd_disk->flags & GENHD_FL_HIDDEN) ||
+           !try_module_get(bdev->bd_disk->fops->owner)) {
+               put_device(&bdev->bd_device);
+               return NULL;
+       }
+
        return bdev;
-put_disk:
-       put_disk(disk);
-bdput:
-       bdput(bdev);
-       return NULL;
 }
 
 void blkdev_put_no_open(struct block_device *bdev)
 {
        module_put(bdev->bd_disk->fops->owner);
-       put_disk(bdev->bd_disk);
-       bdput(bdev);
+       put_device(&bdev->bd_device);
 }
 
 /**
@@ -1407,7 +1224,7 @@ struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder)
 
        mutex_lock(&disk->open_mutex);
        ret = -ENXIO;
-       if (!(disk->flags & GENHD_FL_UP))
+       if (!disk_live(disk))
                goto abort_claiming;
        if (bdev_is_partition(bdev))
                ret = blkdev_get_part(bdev, mode);
index 9a023ae..30d82cd 100644 (file)
@@ -352,7 +352,7 @@ static void end_compressed_bio_write(struct bio *bio)
        btrfs_record_physical_zoned(inode, cb->start, bio);
        btrfs_writepage_endio_finish_ordered(BTRFS_I(inode), NULL,
                        cb->start, cb->start + cb->len - 1,
-                       bio->bi_status == BLK_STS_OK);
+                       !cb->errors);
 
        end_compressed_writeback(inode, cb);
        /* note, our inode could be gone now */
index b117dd3..a59ab7b 100644 (file)
@@ -209,7 +209,7 @@ void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
 static void csum_tree_block(struct extent_buffer *buf, u8 *result)
 {
        struct btrfs_fs_info *fs_info = buf->fs_info;
-       const int num_pages = fs_info->nodesize >> PAGE_SHIFT;
+       const int num_pages = num_extent_pages(buf);
        const int first_page_part = min_t(u32, PAGE_SIZE, fs_info->nodesize);
        SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
        char *kaddr;
index 0117d86..bd5689f 100644 (file)
@@ -629,7 +629,7 @@ again:
         * inode has not been flagged as nocompress.  This flag can
         * change at any time if we discover bad compression ratios.
         */
-       if (nr_pages > 1 && inode_need_compress(BTRFS_I(inode), start, end)) {
+       if (inode_need_compress(BTRFS_I(inode), start, end)) {
                WARN_ON(pages);
                pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
                if (!pages) {
@@ -9226,8 +9226,14 @@ static int btrfs_rename_exchange(struct inode *old_dir,
        bool dest_log_pinned = false;
        bool need_abort = false;
 
-       /* we only allow rename subvolume link between subvolumes */
-       if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
+       /*
+        * For non-subvolumes allow exchange only within one subvolume, in the
+        * same inode namespace. Two subvolumes (represented as directory) can
+        * be exchanged as they're a logical link and have a fixed inode number.
+        */
+       if (root != dest &&
+           (old_ino != BTRFS_FIRST_FREE_OBJECTID ||
+            new_ino != BTRFS_FIRST_FREE_OBJECTID))
                return -EXDEV;
 
        /* close the race window with snapshot create/destroy ioctl */
index 9fd0348..e6430ac 100644 (file)
@@ -6503,8 +6503,8 @@ void btrfs_log_new_name(struct btrfs_trans_handle *trans,
         * if this inode hasn't been logged and directory we're renaming it
         * from hasn't been logged, we don't need to log it
         */
-       if (inode->logged_trans < trans->transid &&
-           (!old_dir || old_dir->logged_trans < trans->transid))
+       if (!inode_logged(trans, inode) &&
+           (!old_dir || !inode_logged(trans, old_dir)))
                return;
 
        /*
index 1e4d43f..70f94b7 100644 (file)
@@ -1078,6 +1078,7 @@ static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices,
                if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
                        list_del_init(&device->dev_alloc_list);
                        clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
+                       fs_devices->rw_devices--;
                }
                list_del_init(&device->dev_list);
                fs_devices->num_devices--;
index a1e2813..7e7a897 100644 (file)
@@ -1395,9 +1395,11 @@ static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf)
                ret = VM_FAULT_SIGBUS;
        } else {
                struct address_space *mapping = inode->i_mapping;
-               struct page *page = find_or_create_page(mapping, 0,
-                                               mapping_gfp_constraint(mapping,
-                                               ~__GFP_FS));
+               struct page *page;
+
+               filemap_invalidate_lock_shared(mapping);
+               page = find_or_create_page(mapping, 0,
+                               mapping_gfp_constraint(mapping, ~__GFP_FS));
                if (!page) {
                        ret = VM_FAULT_OOM;
                        goto out_inline;
@@ -1418,6 +1420,7 @@ static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf)
                vmf->page = page;
                ret = VM_FAULT_MAJOR | VM_FAULT_LOCKED;
 out_inline:
+               filemap_invalidate_unlock_shared(mapping);
                dout("filemap_fault %p %llu read inline data ret %x\n",
                     inode, off, ret);
        }
index 7bdefd0..39db97f 100644 (file)
@@ -1743,7 +1743,11 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask,
 
 struct ceph_cap_flush *ceph_alloc_cap_flush(void)
 {
-       return kmem_cache_alloc(ceph_cap_flush_cachep, GFP_KERNEL);
+       struct ceph_cap_flush *cf;
+
+       cf = kmem_cache_alloc(ceph_cap_flush_cachep, GFP_KERNEL);
+       cf->is_capsnap = false;
+       return cf;
 }
 
 void ceph_free_cap_flush(struct ceph_cap_flush *cf)
@@ -1778,7 +1782,7 @@ static bool __detach_cap_flush_from_mdsc(struct ceph_mds_client *mdsc,
                prev->wake = true;
                wake = false;
        }
-       list_del(&cf->g_list);
+       list_del_init(&cf->g_list);
        return wake;
 }
 
@@ -1793,7 +1797,7 @@ static bool __detach_cap_flush_from_ci(struct ceph_inode_info *ci,
                prev->wake = true;
                wake = false;
        }
-       list_del(&cf->i_list);
+       list_del_init(&cf->i_list);
        return wake;
 }
 
@@ -2352,7 +2356,7 @@ static void __kick_flushing_caps(struct ceph_mds_client *mdsc,
        ci->i_ceph_flags &= ~CEPH_I_KICK_FLUSH;
 
        list_for_each_entry_reverse(cf, &ci->i_cap_flush_list, i_list) {
-               if (!cf->caps) {
+               if (cf->is_capsnap) {
                        last_snap_flush = cf->tid;
                        break;
                }
@@ -2371,7 +2375,7 @@ static void __kick_flushing_caps(struct ceph_mds_client *mdsc,
 
                first_tid = cf->tid + 1;
 
-               if (cf->caps) {
+               if (!cf->is_capsnap) {
                        struct cap_msg_args arg;
 
                        dout("kick_flushing_caps %p cap %p tid %llu %s\n",
@@ -3516,7 +3520,7 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
                        cleaned = cf->caps;
 
                /* Is this a capsnap? */
-               if (cf->caps == 0)
+               if (cf->is_capsnap)
                        continue;
 
                if (cf->tid <= flush_tid) {
@@ -3589,8 +3593,9 @@ out:
        while (!list_empty(&to_remove)) {
                cf = list_first_entry(&to_remove,
                                      struct ceph_cap_flush, i_list);
-               list_del(&cf->i_list);
-               ceph_free_cap_flush(cf);
+               list_del_init(&cf->i_list);
+               if (!cf->is_capsnap)
+                       ceph_free_cap_flush(cf);
        }
 
        if (wake_ci)
@@ -4150,11 +4155,19 @@ bad:
 
 /*
  * Delayed work handler to process end of delayed cap release LRU list.
+ *
+ * If new caps are added to the list while processing it, these won't get
+ * processed in this run.  In this case, the ci->i_hold_caps_max will be
+ * returned so that the work can be scheduled accordingly.
  */
-void ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
+unsigned long ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
 {
        struct inode *inode;
        struct ceph_inode_info *ci;
+       struct ceph_mount_options *opt = mdsc->fsc->mount_options;
+       unsigned long delay_max = opt->caps_wanted_delay_max * HZ;
+       unsigned long loop_start = jiffies;
+       unsigned long delay = 0;
 
        dout("check_delayed_caps\n");
        spin_lock(&mdsc->cap_delay_lock);
@@ -4162,6 +4175,11 @@ void ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
                ci = list_first_entry(&mdsc->cap_delay_list,
                                      struct ceph_inode_info,
                                      i_cap_delay_list);
+               if (time_before(loop_start, ci->i_hold_caps_max - delay_max)) {
+                       dout("%s caps added recently.  Exiting loop", __func__);
+                       delay = ci->i_hold_caps_max;
+                       break;
+               }
                if ((ci->i_ceph_flags & CEPH_I_FLUSH) == 0 &&
                    time_before(jiffies, ci->i_hold_caps_max))
                        break;
@@ -4177,6 +4195,8 @@ void ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
                }
        }
        spin_unlock(&mdsc->cap_delay_lock);
+
+       return delay;
 }
 
 /*
index d1755ac..e1d605a 100644 (file)
@@ -2088,6 +2088,7 @@ static long ceph_fallocate(struct file *file, int mode,
        if (ret < 0)
                goto unlock;
 
+       filemap_invalidate_lock(inode->i_mapping);
        ceph_zero_pagecache_range(inode, offset, length);
        ret = ceph_zero_objects(inode, offset, length);
 
@@ -2100,6 +2101,7 @@ static long ceph_fallocate(struct file *file, int mode,
                if (dirty)
                        __mark_inode_dirty(inode, dirty);
        }
+       filemap_invalidate_unlock(inode->i_mapping);
 
        ceph_put_cap_refs(ci, got);
 unlock:
index fa8a847..bdeb271 100644 (file)
@@ -240,9 +240,6 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
 
        if (!(fl->fl_flags & FL_POSIX))
                return -ENOLCK;
-       /* No mandatory locks */
-       if (__mandatory_lock(file->f_mapping->host) && fl->fl_type != F_UNLCK)
-               return -ENOLCK;
 
        dout("ceph_lock, fl_owner: %p\n", fl->fl_owner);
 
index 9db1b39..0b69aec 100644 (file)
@@ -1616,7 +1616,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
                spin_lock(&mdsc->cap_dirty_lock);
 
                list_for_each_entry(cf, &to_remove, i_list)
-                       list_del(&cf->g_list);
+                       list_del_init(&cf->g_list);
 
                if (!list_empty(&ci->i_dirty_item)) {
                        pr_warn_ratelimited(
@@ -1668,8 +1668,9 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
                struct ceph_cap_flush *cf;
                cf = list_first_entry(&to_remove,
                                      struct ceph_cap_flush, i_list);
-               list_del(&cf->i_list);
-               ceph_free_cap_flush(cf);
+               list_del_init(&cf->i_list);
+               if (!cf->is_capsnap)
+                       ceph_free_cap_flush(cf);
        }
 
        wake_up_all(&ci->i_cap_wq);
@@ -4490,22 +4491,29 @@ void inc_session_sequence(struct ceph_mds_session *s)
 }
 
 /*
- * delayed work -- periodically trim expired leases, renew caps with mds
+ * delayed work -- periodically trim expired leases, renew caps with mds.  If
+ * the @delay parameter is set to 0 or if it's more than 5 secs, the default
+ * workqueue delay value of 5 secs will be used.
  */
-static void schedule_delayed(struct ceph_mds_client *mdsc)
+static void schedule_delayed(struct ceph_mds_client *mdsc, unsigned long delay)
 {
-       int delay = 5;
-       unsigned hz = round_jiffies_relative(HZ * delay);
-       schedule_delayed_work(&mdsc->delayed_work, hz);
+       unsigned long max_delay = HZ * 5;
+
+       /* 5 secs default delay */
+       if (!delay || (delay > max_delay))
+               delay = max_delay;
+       schedule_delayed_work(&mdsc->delayed_work,
+                             round_jiffies_relative(delay));
 }
 
 static void delayed_work(struct work_struct *work)
 {
-       int i;
        struct ceph_mds_client *mdsc =
                container_of(work, struct ceph_mds_client, delayed_work.work);
+       unsigned long delay;
        int renew_interval;
        int renew_caps;
+       int i;
 
        dout("mdsc delayed_work\n");
 
@@ -4545,7 +4553,7 @@ static void delayed_work(struct work_struct *work)
        }
        mutex_unlock(&mdsc->mutex);
 
-       ceph_check_delayed_caps(mdsc);
+       delay = ceph_check_delayed_caps(mdsc);
 
        ceph_queue_cap_reclaim_work(mdsc);
 
@@ -4553,7 +4561,7 @@ static void delayed_work(struct work_struct *work)
 
        maybe_recover_session(mdsc);
 
-       schedule_delayed(mdsc);
+       schedule_delayed(mdsc, delay);
 }
 
 int ceph_mdsc_init(struct ceph_fs_client *fsc)
@@ -5030,7 +5038,7 @@ void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
                          mdsc->mdsmap->m_epoch);
 
        mutex_unlock(&mdsc->mutex);
-       schedule_delayed(mdsc);
+       schedule_delayed(mdsc, 0);
        return;
 
 bad_unlock:
index abd9af7..3c444b9 100644 (file)
@@ -394,9 +394,11 @@ void ceph_mdsmap_destroy(struct ceph_mdsmap *m)
 {
        int i;
 
-       for (i = 0; i < m->possible_max_rank; i++)
-               kfree(m->m_info[i].export_targets);
-       kfree(m->m_info);
+       if (m->m_info) {
+               for (i = 0; i < m->possible_max_rank; i++)
+                       kfree(m->m_info[i].export_targets);
+               kfree(m->m_info);
+       }
        kfree(m->m_data_pg_pools);
        kfree(m);
 }
index 4ac0606..15105f9 100644 (file)
@@ -67,19 +67,19 @@ void ceph_get_snap_realm(struct ceph_mds_client *mdsc,
 {
        lockdep_assert_held(&mdsc->snap_rwsem);
 
-       dout("get_realm %p %d -> %d\n", realm,
-            atomic_read(&realm->nref), atomic_read(&realm->nref)+1);
        /*
-        * since we _only_ increment realm refs or empty the empty
-        * list with snap_rwsem held, adjusting the empty list here is
-        * safe.  we do need to protect against concurrent empty list
-        * additions, however.
+        * The 0->1 and 1->0 transitions must take the snap_empty_lock
+        * atomically with the refcount change. Go ahead and bump the
+        * nref here, unless it's 0, in which case we take the spinlock
+        * and then do the increment and remove it from the list.
         */
-       if (atomic_inc_return(&realm->nref) == 1) {
-               spin_lock(&mdsc->snap_empty_lock);
+       if (atomic_inc_not_zero(&realm->nref))
+               return;
+
+       spin_lock(&mdsc->snap_empty_lock);
+       if (atomic_inc_return(&realm->nref) == 1)
                list_del_init(&realm->empty_item);
-               spin_unlock(&mdsc->snap_empty_lock);
-       }
+       spin_unlock(&mdsc->snap_empty_lock);
 }
 
 static void __insert_snap_realm(struct rb_root *root,
@@ -208,28 +208,28 @@ static void __put_snap_realm(struct ceph_mds_client *mdsc,
 {
        lockdep_assert_held_write(&mdsc->snap_rwsem);
 
-       dout("__put_snap_realm %llx %p %d -> %d\n", realm->ino, realm,
-            atomic_read(&realm->nref), atomic_read(&realm->nref)-1);
+       /*
+        * We do not require the snap_empty_lock here, as any caller that
+        * increments the value must hold the snap_rwsem.
+        */
        if (atomic_dec_and_test(&realm->nref))
                __destroy_snap_realm(mdsc, realm);
 }
 
 /*
- * caller needn't hold any locks
+ * See comments in ceph_get_snap_realm. Caller needn't hold any locks.
  */
 void ceph_put_snap_realm(struct ceph_mds_client *mdsc,
                         struct ceph_snap_realm *realm)
 {
-       dout("put_snap_realm %llx %p %d -> %d\n", realm->ino, realm,
-            atomic_read(&realm->nref), atomic_read(&realm->nref)-1);
-       if (!atomic_dec_and_test(&realm->nref))
+       if (!atomic_dec_and_lock(&realm->nref, &mdsc->snap_empty_lock))
                return;
 
        if (down_write_trylock(&mdsc->snap_rwsem)) {
+               spin_unlock(&mdsc->snap_empty_lock);
                __destroy_snap_realm(mdsc, realm);
                up_write(&mdsc->snap_rwsem);
        } else {
-               spin_lock(&mdsc->snap_empty_lock);
                list_add(&realm->empty_item, &mdsc->snap_empty);
                spin_unlock(&mdsc->snap_empty_lock);
        }
@@ -487,6 +487,9 @@ static void ceph_queue_cap_snap(struct ceph_inode_info *ci)
                pr_err("ENOMEM allocating ceph_cap_snap on %p\n", inode);
                return;
        }
+       capsnap->cap_flush.is_capsnap = true;
+       INIT_LIST_HEAD(&capsnap->cap_flush.i_list);
+       INIT_LIST_HEAD(&capsnap->cap_flush.g_list);
 
        spin_lock(&ci->i_ceph_lock);
        used = __ceph_caps_used(ci);
index 6b6332a..b1a3636 100644 (file)
@@ -182,8 +182,9 @@ struct ceph_cap {
 
 struct ceph_cap_flush {
        u64 tid;
-       int caps; /* 0 means capsnap */
+       int caps;
        bool wake; /* wake up flush waiters when finish ? */
+       bool is_capsnap; /* true means capsnap */
        struct list_head g_list; // global
        struct list_head i_list; // per inode
 };
@@ -1167,7 +1168,7 @@ extern void ceph_flush_snaps(struct ceph_inode_info *ci,
 extern bool __ceph_should_report_size(struct ceph_inode_info *ci);
 extern void ceph_check_caps(struct ceph_inode_info *ci, int flags,
                            struct ceph_mds_session *session);
-extern void ceph_check_delayed_caps(struct ceph_mds_client *mdsc);
+extern unsigned long ceph_check_delayed_caps(struct ceph_mds_client *mdsc);
 extern void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc);
 extern int  ceph_drop_caps_for_unlink(struct inode *inode);
 extern int ceph_encode_inode_release(void **p, struct inode *inode,
index c0bfc2f..c6a9542 100644 (file)
@@ -1611,6 +1611,11 @@ struct dfs_info3_param {
        int ttl;
 };
 
+struct file_list {
+       struct list_head list;
+       struct cifsFileInfo *cfile;
+};
+
 /*
  * common struct for holding inode info when searching for or updating an
  * inode with new info
index 79402ca..5f8a302 100644 (file)
@@ -100,7 +100,7 @@ build_path_from_dentry_optional_prefix(struct dentry *direntry, void *page,
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
                pplen = cifs_sb->prepath ? strlen(cifs_sb->prepath) + 1 : 0;
 
-       s = dentry_path_raw(direntry, page, PAGE_SIZE);
+       s = dentry_path_raw(direntry, page, PATH_MAX);
        if (IS_ERR(s))
                return s;
        if (!s[1])      // for root we want "", not "/"
index cd10860..bb98fbd 100644 (file)
@@ -4619,7 +4619,7 @@ read_complete:
 
 static int cifs_readpage(struct file *file, struct page *page)
 {
-       loff_t offset = (loff_t)page->index << PAGE_SHIFT;
+       loff_t offset = page_file_offset(page);
        int rc = -EACCES;
        unsigned int xid;
 
@@ -4847,17 +4847,6 @@ void cifs_oplock_break(struct work_struct *work)
                cifs_dbg(VFS, "Push locks rc = %d\n", rc);
 
 oplock_break_ack:
-       /*
-        * releasing stale oplock after recent reconnect of smb session using
-        * a now incorrect file handle is not a data integrity issue but do
-        * not bother sending an oplock release if session to server still is
-        * disconnected since oplock already released by the server
-        */
-       if (!cfile->oplock_break_cancelled) {
-               rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
-                                                            cinode);
-               cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
-       }
        /*
         * When oplock break is received and there are no active
         * file handles but cached, then schedule deferred close immediately.
@@ -4865,17 +4854,27 @@ oplock_break_ack:
         */
        spin_lock(&CIFS_I(inode)->deferred_lock);
        is_deferred = cifs_is_deferred_close(cfile, &dclose);
+       spin_unlock(&CIFS_I(inode)->deferred_lock);
        if (is_deferred &&
            cfile->deferred_close_scheduled &&
            delayed_work_pending(&cfile->deferred)) {
-               /*
-                * If there is no pending work, mod_delayed_work queues new work.
-                * So, Increase the ref count to avoid use-after-free.
-                */
-               if (!mod_delayed_work(deferredclose_wq, &cfile->deferred, 0))
-                       cifsFileInfo_get(cfile);
+               if (cancel_delayed_work(&cfile->deferred)) {
+                       _cifsFileInfo_put(cfile, false, false);
+                       goto oplock_break_done;
+               }
        }
-       spin_unlock(&CIFS_I(inode)->deferred_lock);
+       /*
+        * releasing stale oplock after recent reconnect of smb session using
+        * a now incorrect file handle is not a data integrity issue but do
+        * not bother sending an oplock release if session to server still is
+        * disconnected since oplock already released by the server
+        */
+       if (!cfile->oplock_break_cancelled) {
+               rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
+                                                            cinode);
+               cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
+       }
+oplock_break_done:
        _cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
        cifs_done_oplock_break(cinode);
 }
index 9a59d7f..eed59bc 100644 (file)
@@ -925,6 +925,13 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
                ctx->cred_uid = uid;
                ctx->cruid_specified = true;
                break;
+       case Opt_backupuid:
+               uid = make_kuid(current_user_ns(), result.uint_32);
+               if (!uid_valid(uid))
+                       goto cifs_parse_mount_err;
+               ctx->backupuid = uid;
+               ctx->backupuid_specified = true;
+               break;
        case Opt_backupgid:
                gid = make_kgid(current_user_ns(), result.uint_32);
                if (!gid_valid(gid))
index b96b253..65f8a70 100644 (file)
@@ -1625,7 +1625,7 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry)
                goto unlink_out;
        }
 
-       cifs_close_all_deferred_files(tcon);
+       cifs_close_deferred_file(CIFS_I(inode));
        if (cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
                                le64_to_cpu(tcon->fsUnixInfo.Capability))) {
                rc = CIFSPOSIXDelFile(xid, tcon, full_path,
@@ -2084,6 +2084,7 @@ cifs_rename2(struct user_namespace *mnt_userns, struct inode *source_dir,
        FILE_UNIX_BASIC_INFO *info_buf_target;
        unsigned int xid;
        int rc, tmprc;
+       int retry_count = 0;
 
        if (flags & ~RENAME_NOREPLACE)
                return -EINVAL;
@@ -2113,10 +2114,24 @@ cifs_rename2(struct user_namespace *mnt_userns, struct inode *source_dir,
                goto cifs_rename_exit;
        }
 
-       cifs_close_all_deferred_files(tcon);
+       cifs_close_deferred_file(CIFS_I(d_inode(source_dentry)));
+       if (d_inode(target_dentry) != NULL)
+               cifs_close_deferred_file(CIFS_I(d_inode(target_dentry)));
+
        rc = cifs_do_rename(xid, source_dentry, from_name, target_dentry,
                            to_name);
 
+       if (rc == -EACCES) {
+               while (retry_count < 3) {
+                       cifs_close_all_deferred_files(tcon);
+                       rc = cifs_do_rename(xid, source_dentry, from_name, target_dentry,
+                                           to_name);
+                       if (rc != -EACCES)
+                               break;
+                       retry_count++;
+               }
+       }
+
        /*
         * No-replace is the natural behavior for CIFS, so skip unlink hacks.
         */
index 844abeb..9469f1c 100644 (file)
@@ -723,13 +723,31 @@ void
 cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode)
 {
        struct cifsFileInfo *cfile = NULL;
-       struct cifs_deferred_close *dclose;
+       struct file_list *tmp_list, *tmp_next_list;
+       struct list_head file_head;
+
+       if (cifs_inode == NULL)
+               return;
 
+       INIT_LIST_HEAD(&file_head);
+       spin_lock(&cifs_inode->open_file_lock);
        list_for_each_entry(cfile, &cifs_inode->openFileList, flist) {
-               spin_lock(&cifs_inode->deferred_lock);
-               if (cifs_is_deferred_close(cfile, &dclose))
-                       mod_delayed_work(deferredclose_wq, &cfile->deferred, 0);
-               spin_unlock(&cifs_inode->deferred_lock);
+               if (delayed_work_pending(&cfile->deferred)) {
+                       if (cancel_delayed_work(&cfile->deferred)) {
+                               tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
+                               if (tmp_list == NULL)
+                                       continue;
+                               tmp_list->cfile = cfile;
+                               list_add_tail(&tmp_list->list, &file_head);
+                       }
+               }
+       }
+       spin_unlock(&cifs_inode->open_file_lock);
+
+       list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
+               _cifsFileInfo_put(tmp_list->cfile, true, false);
+               list_del(&tmp_list->list);
+               kfree(tmp_list);
        }
 }
 
@@ -738,20 +756,30 @@ cifs_close_all_deferred_files(struct cifs_tcon *tcon)
 {
        struct cifsFileInfo *cfile;
        struct list_head *tmp;
+       struct file_list *tmp_list, *tmp_next_list;
+       struct list_head file_head;
 
+       INIT_LIST_HEAD(&file_head);
        spin_lock(&tcon->open_file_lock);
        list_for_each(tmp, &tcon->openFileList) {
                cfile = list_entry(tmp, struct cifsFileInfo, tlist);
                if (delayed_work_pending(&cfile->deferred)) {
-                       /*
-                        * If there is no pending work, mod_delayed_work queues new work.
-                        * So, Increase the ref count to avoid use-after-free.
-                        */
-                       if (!mod_delayed_work(deferredclose_wq, &cfile->deferred, 0))
-                               cifsFileInfo_get(cfile);
+                       if (cancel_delayed_work(&cfile->deferred)) {
+                               tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
+                               if (tmp_list == NULL)
+                                       continue;
+                               tmp_list->cfile = cfile;
+                               list_add_tail(&tmp_list->list, &file_head);
+                       }
                }
        }
        spin_unlock(&tcon->open_file_lock);
+
+       list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
+               _cifsFileInfo_put(tmp_list->cfile, true, false);
+               list_del(&tmp_list->list);
+               kfree(tmp_list);
+       }
 }
 
 /* parses DFS refferal V3 structure
index 23d6f4d..ddc0e8f 100644 (file)
@@ -3590,6 +3590,7 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
                return rc;
        }
 
+       filemap_invalidate_lock(inode->i_mapping);
        /*
         * We implement the punch hole through ioctl, so we need remove the page
         * caches first, otherwise the data may be inconsistent with the server.
@@ -3607,6 +3608,7 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
                        sizeof(struct file_zero_data_information),
                        CIFSMaxBufSize, NULL, NULL);
        free_xid(xid);
+       filemap_invalidate_unlock(inode->i_mapping);
        return rc;
 }
 
@@ -3617,7 +3619,8 @@ static int smb3_simple_fallocate_write_range(unsigned int xid,
                                             char *buf)
 {
        struct cifs_io_parms io_parms = {0};
-       int rc, nbytes;
+       int nbytes;
+       int rc = 0;
        struct kvec iov[2];
 
        io_parms.netfid = cfile->fid.netfid;
index 781d14e..b6d2e35 100644 (file)
@@ -2426,7 +2426,7 @@ create_sd_buf(umode_t mode, bool set_owner, unsigned int *len)
        memcpy(aclptr, &acl, sizeof(struct cifs_acl));
 
        buf->ccontext.DataLength = cpu_to_le32(ptr - (__u8 *)&buf->sd);
-       *len = ptr - (__u8 *)buf;
+       *len = roundup(ptr - (__u8 *)buf, 8);
 
        return buf;
 }
index 5a0be99..0ad3215 100644 (file)
@@ -177,28 +177,22 @@ out:
        return retval;
 }
 
-/* Fill [buffer, buffer + pos) with data coming from @from. */
-static int fill_write_buffer(struct configfs_buffer *buffer, loff_t pos,
+/* Fill @buffer with data coming from @from. */
+static int fill_write_buffer(struct configfs_buffer *buffer,
                             struct iov_iter *from)
 {
-       loff_t to_copy;
        int copied;
-       u8 *to;
 
        if (!buffer->page)
                buffer->page = (char *)__get_free_pages(GFP_KERNEL, 0);
        if (!buffer->page)
                return -ENOMEM;
 
-       to_copy = SIMPLE_ATTR_SIZE - 1 - pos;
-       if (to_copy <= 0)
-               return 0;
-       to = buffer->page + pos;
-       copied = copy_from_iter(to, to_copy, from);
+       copied = copy_from_iter(buffer->page, SIMPLE_ATTR_SIZE - 1, from);
        buffer->needs_read_fill = 1;
        /* if buf is assumed to contain a string, terminate it by \0,
         * so e.g. sscanf() can scan the string easily */
-       to[copied] = 0;
+       buffer->page[copied] = 0;
        return copied ? : -EFAULT;
 }
 
@@ -227,10 +221,10 @@ static ssize_t configfs_write_iter(struct kiocb *iocb, struct iov_iter *from)
 {
        struct file *file = iocb->ki_filp;
        struct configfs_buffer *buffer = file->private_data;
-       ssize_t len;
+       int len;
 
        mutex_lock(&buffer->mutex);
-       len = fill_write_buffer(buffer, iocb->ki_pos, from);
+       len = fill_write_buffer(buffer, from);
        if (len > 0)
                len = flush_write_buffer(file, buffer, len);
        if (len > 0)
index da41f93..99b4e78 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -722,7 +722,7 @@ static int copy_cow_page_dax(struct block_device *bdev, struct dax_device *dax_d
                return rc;
 
        id = dax_read_lock();
-       rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(PAGE_SIZE), &kaddr, NULL);
+       rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, NULL);
        if (rc < 0) {
                dax_read_unlock(id);
                return rc;
index e265b6d..3627dd7 100644 (file)
@@ -25,8 +25,6 @@
 #include <linux/idr.h>
 #include <linux/uio.h>
 
-DEFINE_PER_CPU(int, eventfd_wake_count);
-
 static DEFINE_IDA(eventfd_ida);
 
 struct eventfd_ctx {
@@ -67,21 +65,21 @@ __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
         * Deadlock or stack overflow issues can happen if we recurse here
         * through waitqueue wakeup handlers. If the caller users potentially
         * nested waitqueues with custom wakeup handlers, then it should
-        * check eventfd_signal_count() before calling this function. If
-        * it returns true, the eventfd_signal() call should be deferred to a
+        * check eventfd_signal_allowed() before calling this function. If
+        * it returns false, the eventfd_signal() call should be deferred to a
         * safe context.
         */
-       if (WARN_ON_ONCE(this_cpu_read(eventfd_wake_count)))
+       if (WARN_ON_ONCE(current->in_eventfd_signal))
                return 0;
 
        spin_lock_irqsave(&ctx->wqh.lock, flags);
-       this_cpu_inc(eventfd_wake_count);
+       current->in_eventfd_signal = 1;
        if (ULLONG_MAX - ctx->count < n)
                n = ULLONG_MAX - ctx->count;
        ctx->count += n;
        if (waitqueue_active(&ctx->wqh))
                wake_up_locked_poll(&ctx->wqh, EPOLLIN);
-       this_cpu_dec(eventfd_wake_count);
+       current->in_eventfd_signal = 0;
        spin_unlock_irqrestore(&ctx->wqh.lock, flags);
 
        return n;
index 54eec91..1248ff4 100644 (file)
@@ -1,6 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0-only
 config EXT2_FS
        tristate "Second extended fs support"
+       select FS_IOMAP
        help
          Ext2 is a standard Linux file system for hard disks.
 
index 14292db..2c2f179 100644 (file)
@@ -106,12 +106,11 @@ static int ext2_commit_chunk(struct page *page, loff_t pos, unsigned len)
        return err;
 }
 
-static bool ext2_check_page(struct page *page, int quiet)
+static bool ext2_check_page(struct page *page, int quiet, char *kaddr)
 {
        struct inode *dir = page->mapping->host;
        struct super_block *sb = dir->i_sb;
        unsigned chunk_size = ext2_chunk_size(dir);
-       char *kaddr = page_address(page);
        u32 max_inumber = le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count);
        unsigned offs, rec_len;
        unsigned limit = PAGE_SIZE;
@@ -205,7 +204,8 @@ static struct page * ext2_get_page(struct inode *dir, unsigned long n,
        if (!IS_ERR(page)) {
                *page_addr = kmap_local_page(page);
                if (unlikely(!PageChecked(page))) {
-                       if (PageError(page) || !ext2_check_page(page, quiet))
+                       if (PageError(page) || !ext2_check_page(page, quiet,
+                                                               *page_addr))
                                goto fail;
                }
        }
@@ -584,10 +584,10 @@ out_unlock:
  * ext2_delete_entry deletes a directory entry by merging it with the
  * previous entry. Page is up-to-date.
  */
-int ext2_delete_entry (struct ext2_dir_entry_2 * dir, struct page * page )
+int ext2_delete_entry (struct ext2_dir_entry_2 *dir, struct page *page,
+                       char *kaddr)
 {
        struct inode *inode = page->mapping->host;
-       char *kaddr = page_address(page);
        unsigned from = ((char*)dir - kaddr) & ~(ext2_chunk_size(inode)-1);
        unsigned to = ((char *)dir - kaddr) +
                                ext2_rec_len_from_disk(dir->rec_len);
@@ -607,7 +607,7 @@ int ext2_delete_entry (struct ext2_dir_entry_2 * dir, struct page * page )
                de = ext2_next_entry(de);
        }
        if (pde)
-               from = (char*)pde - (char*)page_address(page);
+               from = (char *)pde - kaddr;
        pos = page_offset(page) + from;
        lock_page(page);
        err = ext2_prepare_chunk(page, pos, to - from);
index b0a6948..3be9dd6 100644 (file)
@@ -667,9 +667,6 @@ struct ext2_inode_info {
        struct rw_semaphore xattr_sem;
 #endif
        rwlock_t i_meta_lock;
-#ifdef CONFIG_FS_DAX
-       struct rw_semaphore dax_sem;
-#endif
 
        /*
         * truncate_mutex is for serialising ext2_truncate() against
@@ -685,14 +682,6 @@ struct ext2_inode_info {
 #endif
 };
 
-#ifdef CONFIG_FS_DAX
-#define dax_sem_down_write(ext2_inode) down_write(&(ext2_inode)->dax_sem)
-#define dax_sem_up_write(ext2_inode)   up_write(&(ext2_inode)->dax_sem)
-#else
-#define dax_sem_down_write(ext2_inode)
-#define dax_sem_up_write(ext2_inode)
-#endif
-
 /*
  * Inode dynamic state flags
  */
@@ -740,7 +729,8 @@ extern int ext2_inode_by_name(struct inode *dir,
 extern int ext2_make_empty(struct inode *, struct inode *);
 extern struct ext2_dir_entry_2 *ext2_find_entry(struct inode *, const struct qstr *,
                                                struct page **, void **res_page_addr);
-extern int ext2_delete_entry (struct ext2_dir_entry_2 *, struct page *);
+extern int ext2_delete_entry(struct ext2_dir_entry_2 *dir, struct page *page,
+                            char *kaddr);
 extern int ext2_empty_dir (struct inode *);
 extern struct ext2_dir_entry_2 *ext2_dotdot(struct inode *dir, struct page **p, void **pa);
 extern void ext2_set_link(struct inode *, struct ext2_dir_entry_2 *, struct page *, void *,
index f98466a..eb97aa3 100644 (file)
@@ -81,7 +81,7 @@ out_unlock:
  *
  * mmap_lock (MM)
  *   sb_start_pagefault (vfs, freeze)
- *     ext2_inode_info->dax_sem
+ *     address_space->invalidate_lock
  *       address_space->i_mmap_rwsem or page_lock (mutually exclusive in DAX)
  *         ext2_inode_info->truncate_mutex
  *
@@ -91,7 +91,6 @@ out_unlock:
 static vm_fault_t ext2_dax_fault(struct vm_fault *vmf)
 {
        struct inode *inode = file_inode(vmf->vma->vm_file);
-       struct ext2_inode_info *ei = EXT2_I(inode);
        vm_fault_t ret;
        bool write = (vmf->flags & FAULT_FLAG_WRITE) &&
                (vmf->vma->vm_flags & VM_SHARED);
@@ -100,11 +99,11 @@ static vm_fault_t ext2_dax_fault(struct vm_fault *vmf)
                sb_start_pagefault(inode->i_sb);
                file_update_time(vmf->vma->vm_file);
        }
-       down_read(&ei->dax_sem);
+       filemap_invalidate_lock_shared(inode->i_mapping);
 
        ret = dax_iomap_fault(vmf, PE_SIZE_PTE, NULL, NULL, &ext2_iomap_ops);
 
-       up_read(&ei->dax_sem);
+       filemap_invalidate_unlock_shared(inode->i_mapping);
        if (write)
                sb_end_pagefault(inode->i_sb);
        return ret;
index dadb121..333fa62 100644 (file)
@@ -799,7 +799,6 @@ int ext2_get_block(struct inode *inode, sector_t iblock,
 
 }
 
-#ifdef CONFIG_FS_DAX
 static int ext2_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
                unsigned flags, struct iomap *iomap, struct iomap *srcmap)
 {
@@ -852,16 +851,18 @@ const struct iomap_ops ext2_iomap_ops = {
        .iomap_begin            = ext2_iomap_begin,
        .iomap_end              = ext2_iomap_end,
 };
-#else
-/* Define empty ops for !CONFIG_FS_DAX case to avoid ugly ifdefs */
-const struct iomap_ops ext2_iomap_ops;
-#endif /* CONFIG_FS_DAX */
 
 int ext2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                u64 start, u64 len)
 {
-       return generic_block_fiemap(inode, fieinfo, start, len,
-                                   ext2_get_block);
+       int ret;
+
+       inode_lock(inode);
+       len = min_t(u64, len, i_size_read(inode));
+       ret = iomap_fiemap(inode, fieinfo, start, len, &ext2_iomap_ops);
+       inode_unlock(inode);
+
+       return ret;
 }
 
 static int ext2_writepage(struct page *page, struct writeback_control *wbc)
@@ -1177,7 +1178,7 @@ static void ext2_free_branches(struct inode *inode, __le32 *p, __le32 *q, int de
                ext2_free_data(inode, p, q);
 }
 
-/* dax_sem must be held when calling this function */
+/* mapping->invalidate_lock must be held when calling this function */
 static void __ext2_truncate_blocks(struct inode *inode, loff_t offset)
 {
        __le32 *i_data = EXT2_I(inode)->i_data;
@@ -1194,7 +1195,7 @@ static void __ext2_truncate_blocks(struct inode *inode, loff_t offset)
        iblock = (offset + blocksize-1) >> EXT2_BLOCK_SIZE_BITS(inode->i_sb);
 
 #ifdef CONFIG_FS_DAX
-       WARN_ON(!rwsem_is_locked(&ei->dax_sem));
+       WARN_ON(!rwsem_is_locked(&inode->i_mapping->invalidate_lock));
 #endif
 
        n = ext2_block_to_path(inode, iblock, offsets, NULL);
@@ -1276,9 +1277,9 @@ static void ext2_truncate_blocks(struct inode *inode, loff_t offset)
        if (ext2_inode_is_fast_symlink(inode))
                return;
 
-       dax_sem_down_write(EXT2_I(inode));
+       filemap_invalidate_lock(inode->i_mapping);
        __ext2_truncate_blocks(inode, offset);
-       dax_sem_up_write(EXT2_I(inode));
+       filemap_invalidate_unlock(inode->i_mapping);
 }
 
 static int ext2_setsize(struct inode *inode, loff_t newsize)
@@ -1308,10 +1309,10 @@ static int ext2_setsize(struct inode *inode, loff_t newsize)
        if (error)
                return error;
 
-       dax_sem_down_write(EXT2_I(inode));
+       filemap_invalidate_lock(inode->i_mapping);
        truncate_setsize(inode, newsize);
        __ext2_truncate_blocks(inode, newsize);
-       dax_sem_up_write(EXT2_I(inode));
+       filemap_invalidate_unlock(inode->i_mapping);
 
        inode->i_mtime = inode->i_ctime = current_time(inode);
        if (inode_needs_sync(inode)) {
index 1f69b81..5f6b756 100644 (file)
@@ -293,7 +293,7 @@ static int ext2_unlink(struct inode * dir, struct dentry *dentry)
                goto out;
        }
 
-       err = ext2_delete_entry (de, page);
+       err = ext2_delete_entry (de, page, page_addr);
        ext2_put_page(page, page_addr);
        if (err)
                goto out;
@@ -397,7 +397,7 @@ static int ext2_rename (struct user_namespace * mnt_userns,
        old_inode->i_ctime = current_time(old_inode);
        mark_inode_dirty(old_inode);
 
-       ext2_delete_entry(old_de, old_page);
+       ext2_delete_entry(old_de, old_page, old_page_addr);
 
        if (dir_de) {
                if (old_dir != new_dir)
index 21e09fb..987bcf3 100644 (file)
@@ -206,9 +206,6 @@ static void init_once(void *foo)
        init_rwsem(&ei->xattr_sem);
 #endif
        mutex_init(&ei->truncate_mutex);
-#ifdef CONFIG_FS_DAX
-       init_rwsem(&ei->dax_sem);
-#endif
        inode_init_once(&ei->vfs_inode);
 }
 
index 3c51e24..7ebaf66 100644 (file)
@@ -1086,15 +1086,6 @@ struct ext4_inode_info {
         * by other means, so we have i_data_sem.
         */
        struct rw_semaphore i_data_sem;
-       /*
-        * i_mmap_sem is for serializing page faults with truncate / punch hole
-        * operations. We have to make sure that new page cannot be faulted in
-        * a section of the inode that is being punched. We cannot easily use
-        * i_data_sem for this since we need protection for the whole punch
-        * operation and i_data_sem ranks below transaction start so we have
-        * to occasionally drop it.
-        */
-       struct rw_semaphore i_mmap_sem;
        struct inode vfs_inode;
        struct jbd2_inode *jinode;
 
@@ -2972,7 +2963,6 @@ extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks);
 extern int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
                             loff_t lstart, loff_t lend);
 extern vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf);
-extern vm_fault_t ext4_filemap_fault(struct vm_fault *vmf);
 extern qsize_t *ext4_get_reserved_space(struct inode *inode);
 extern int ext4_get_projid(struct inode *inode, kprojid_t *projid);
 extern void ext4_da_release_space(struct inode *inode, int to_free);
index b96ecba..b60f015 100644 (file)
@@ -244,9 +244,6 @@ int __ext4_journal_get_write_access(const char *where, unsigned int line,
  * "bh" may be NULL: a metadata block may have been freed from memory
  * but there may still be a record of it in the journal, and that record
  * still needs to be revoked.
- *
- * If the handle isn't valid we're not journaling, but we still need to
- * call into ext4_journal_revoke() to put the buffer head.
  */
 int __ext4_forget(const char *where, unsigned int line, handle_t *handle,
                  int is_metadata, struct inode *inode,
index 92ad64b..c33e0a2 100644 (file)
@@ -4474,6 +4474,7 @@ static long ext4_zero_range(struct file *file, loff_t offset,
                            loff_t len, int mode)
 {
        struct inode *inode = file_inode(file);
+       struct address_space *mapping = file->f_mapping;
        handle_t *handle = NULL;
        unsigned int max_blocks;
        loff_t new_size = 0;
@@ -4560,17 +4561,17 @@ static long ext4_zero_range(struct file *file, loff_t offset,
                 * Prevent page faults from reinstantiating pages we have
                 * released from page cache.
                 */
-               down_write(&EXT4_I(inode)->i_mmap_sem);
+               filemap_invalidate_lock(mapping);
 
                ret = ext4_break_layouts(inode);
                if (ret) {
-                       up_write(&EXT4_I(inode)->i_mmap_sem);
+                       filemap_invalidate_unlock(mapping);
                        goto out_mutex;
                }
 
                ret = ext4_update_disksize_before_punch(inode, offset, len);
                if (ret) {
-                       up_write(&EXT4_I(inode)->i_mmap_sem);
+                       filemap_invalidate_unlock(mapping);
                        goto out_mutex;
                }
                /* Now release the pages and zero block aligned part of pages */
@@ -4579,7 +4580,7 @@ static long ext4_zero_range(struct file *file, loff_t offset,
 
                ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
                                             flags);
-               up_write(&EXT4_I(inode)->i_mmap_sem);
+               filemap_invalidate_unlock(mapping);
                if (ret)
                        goto out_mutex;
        }
@@ -5221,6 +5222,7 @@ out:
 static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
 {
        struct super_block *sb = inode->i_sb;
+       struct address_space *mapping = inode->i_mapping;
        ext4_lblk_t punch_start, punch_stop;
        handle_t *handle;
        unsigned int credits;
@@ -5274,7 +5276,7 @@ static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
         * Prevent page faults from reinstantiating pages we have released from
         * page cache.
         */
-       down_write(&EXT4_I(inode)->i_mmap_sem);
+       filemap_invalidate_lock(mapping);
 
        ret = ext4_break_layouts(inode);
        if (ret)
@@ -5289,15 +5291,15 @@ static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
         * Write tail of the last page before removed range since it will get
         * removed from the page cache below.
         */
-       ret = filemap_write_and_wait_range(inode->i_mapping, ioffset, offset);
+       ret = filemap_write_and_wait_range(mapping, ioffset, offset);
        if (ret)
                goto out_mmap;
        /*
         * Write data that will be shifted to preserve them when discarding
         * page cache below. We are also protected from pages becoming dirty
-        * by i_mmap_sem.
+        * by i_rwsem and invalidate_lock.
         */
-       ret = filemap_write_and_wait_range(inode->i_mapping, offset + len,
+       ret = filemap_write_and_wait_range(mapping, offset + len,
                                           LLONG_MAX);
        if (ret)
                goto out_mmap;
@@ -5350,7 +5352,7 @@ out_stop:
        ext4_journal_stop(handle);
        ext4_fc_stop_ineligible(sb);
 out_mmap:
-       up_write(&EXT4_I(inode)->i_mmap_sem);
+       filemap_invalidate_unlock(mapping);
 out_mutex:
        inode_unlock(inode);
        return ret;
@@ -5367,6 +5369,7 @@ out_mutex:
 static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
 {
        struct super_block *sb = inode->i_sb;
+       struct address_space *mapping = inode->i_mapping;
        handle_t *handle;
        struct ext4_ext_path *path;
        struct ext4_extent *extent;
@@ -5425,7 +5428,7 @@ static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
         * Prevent page faults from reinstantiating pages we have released from
         * page cache.
         */
-       down_write(&EXT4_I(inode)->i_mmap_sem);
+       filemap_invalidate_lock(mapping);
 
        ret = ext4_break_layouts(inode);
        if (ret)
@@ -5526,7 +5529,7 @@ out_stop:
        ext4_journal_stop(handle);
        ext4_fc_stop_ineligible(sb);
 out_mmap:
-       up_write(&EXT4_I(inode)->i_mmap_sem);
+       filemap_invalidate_unlock(mapping);
 out_mutex:
        inode_unlock(inode);
        return ret;
index 816dedc..d3b4ed9 100644 (file)
@@ -704,22 +704,23 @@ static vm_fault_t ext4_dax_huge_fault(struct vm_fault *vmf,
         */
        bool write = (vmf->flags & FAULT_FLAG_WRITE) &&
                (vmf->vma->vm_flags & VM_SHARED);
+       struct address_space *mapping = vmf->vma->vm_file->f_mapping;
        pfn_t pfn;
 
        if (write) {
                sb_start_pagefault(sb);
                file_update_time(vmf->vma->vm_file);
-               down_read(&EXT4_I(inode)->i_mmap_sem);
+               filemap_invalidate_lock_shared(mapping);
 retry:
                handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
                                               EXT4_DATA_TRANS_BLOCKS(sb));
                if (IS_ERR(handle)) {
-                       up_read(&EXT4_I(inode)->i_mmap_sem);
+                       filemap_invalidate_unlock_shared(mapping);
                        sb_end_pagefault(sb);
                        return VM_FAULT_SIGBUS;
                }
        } else {
-               down_read(&EXT4_I(inode)->i_mmap_sem);
+               filemap_invalidate_lock_shared(mapping);
        }
        result = dax_iomap_fault(vmf, pe_size, &pfn, &error, &ext4_iomap_ops);
        if (write) {
@@ -731,10 +732,10 @@ retry:
                /* Handling synchronous page fault? */
                if (result & VM_FAULT_NEEDDSYNC)
                        result = dax_finish_sync_fault(vmf, pe_size, pfn);
-               up_read(&EXT4_I(inode)->i_mmap_sem);
+               filemap_invalidate_unlock_shared(mapping);
                sb_end_pagefault(sb);
        } else {
-               up_read(&EXT4_I(inode)->i_mmap_sem);
+               filemap_invalidate_unlock_shared(mapping);
        }
 
        return result;
@@ -756,7 +757,7 @@ static const struct vm_operations_struct ext4_dax_vm_ops = {
 #endif
 
 static const struct vm_operations_struct ext4_file_vm_ops = {
-       .fault          = ext4_filemap_fault,
+       .fault          = filemap_fault,
        .map_pages      = filemap_map_pages,
        .page_mkwrite   = ext4_page_mkwrite,
 };
index d8de607..325c038 100644 (file)
@@ -3950,20 +3950,19 @@ int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
        return ret;
 }
 
-static void ext4_wait_dax_page(struct ext4_inode_info *ei)
+static void ext4_wait_dax_page(struct inode *inode)
 {
-       up_write(&ei->i_mmap_sem);
+       filemap_invalidate_unlock(inode->i_mapping);
        schedule();
-       down_write(&ei->i_mmap_sem);
+       filemap_invalidate_lock(inode->i_mapping);
 }
 
 int ext4_break_layouts(struct inode *inode)
 {
-       struct ext4_inode_info *ei = EXT4_I(inode);
        struct page *page;
        int error;
 
-       if (WARN_ON_ONCE(!rwsem_is_locked(&ei->i_mmap_sem)))
+       if (WARN_ON_ONCE(!rwsem_is_locked(&inode->i_mapping->invalidate_lock)))
                return -EINVAL;
 
        do {
@@ -3974,7 +3973,7 @@ int ext4_break_layouts(struct inode *inode)
                error = ___wait_var_event(&page->_refcount,
                                atomic_read(&page->_refcount) == 1,
                                TASK_INTERRUPTIBLE, 0, 0,
-                               ext4_wait_dax_page(ei));
+                               ext4_wait_dax_page(inode));
        } while (error == 0);
 
        return error;
@@ -4005,9 +4004,9 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
 
        ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
        if (ext4_has_inline_data(inode)) {
-               down_write(&EXT4_I(inode)->i_mmap_sem);
+               filemap_invalidate_lock(mapping);
                ret = ext4_convert_inline_data(inode);
-               up_write(&EXT4_I(inode)->i_mmap_sem);
+               filemap_invalidate_unlock(mapping);
                if (ret)
                        return ret;
        }
@@ -4058,7 +4057,7 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
         * Prevent page faults from reinstantiating pages we have released from
         * page cache.
         */
-       down_write(&EXT4_I(inode)->i_mmap_sem);
+       filemap_invalidate_lock(mapping);
 
        ret = ext4_break_layouts(inode);
        if (ret)
@@ -4131,7 +4130,7 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
 out_stop:
        ext4_journal_stop(handle);
 out_dio:
-       up_write(&EXT4_I(inode)->i_mmap_sem);
+       filemap_invalidate_unlock(mapping);
 out_mutex:
        inode_unlock(inode);
        return ret;
@@ -5426,11 +5425,11 @@ int ext4_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
                        inode_dio_wait(inode);
                }
 
-               down_write(&EXT4_I(inode)->i_mmap_sem);
+               filemap_invalidate_lock(inode->i_mapping);
 
                rc = ext4_break_layouts(inode);
                if (rc) {
-                       up_write(&EXT4_I(inode)->i_mmap_sem);
+                       filemap_invalidate_unlock(inode->i_mapping);
                        goto err_out;
                }
 
@@ -5506,7 +5505,7 @@ int ext4_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
                                error = rc;
                }
 out_mmap_sem:
-               up_write(&EXT4_I(inode)->i_mmap_sem);
+               filemap_invalidate_unlock(inode->i_mapping);
        }
 
        if (!error) {
@@ -5983,10 +5982,10 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
         * data (and journalled aops don't know how to handle these cases).
         */
        if (val) {
-               down_write(&EXT4_I(inode)->i_mmap_sem);
+               filemap_invalidate_lock(inode->i_mapping);
                err = filemap_write_and_wait(inode->i_mapping);
                if (err < 0) {
-                       up_write(&EXT4_I(inode)->i_mmap_sem);
+                       filemap_invalidate_unlock(inode->i_mapping);
                        return err;
                }
        }
@@ -6019,7 +6018,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
        percpu_up_write(&sbi->s_writepages_rwsem);
 
        if (val)
-               up_write(&EXT4_I(inode)->i_mmap_sem);
+               filemap_invalidate_unlock(inode->i_mapping);
 
        /* Finally we can mark the inode as dirty. */
 
@@ -6063,7 +6062,7 @@ vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf)
        sb_start_pagefault(inode->i_sb);
        file_update_time(vma->vm_file);
 
-       down_read(&EXT4_I(inode)->i_mmap_sem);
+       filemap_invalidate_lock_shared(mapping);
 
        err = ext4_convert_inline_data(inode);
        if (err)
@@ -6176,7 +6175,7 @@ retry_alloc:
 out_ret:
        ret = block_page_mkwrite_return(err);
 out:
-       up_read(&EXT4_I(inode)->i_mmap_sem);
+       filemap_invalidate_unlock_shared(mapping);
        sb_end_pagefault(inode->i_sb);
        return ret;
 out_error:
@@ -6184,15 +6183,3 @@ out_error:
        ext4_journal_stop(handle);
        goto out;
 }
-
-vm_fault_t ext4_filemap_fault(struct vm_fault *vmf)
-{
-       struct inode *inode = file_inode(vmf->vma->vm_file);
-       vm_fault_t ret;
-
-       down_read(&EXT4_I(inode)->i_mmap_sem);
-       ret = filemap_fault(vmf);
-       up_read(&EXT4_I(inode)->i_mmap_sem);
-
-       return ret;
-}
index 6eed617..4fb5fe0 100644 (file)
@@ -148,7 +148,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
                goto journal_err_out;
        }
 
-       down_write(&EXT4_I(inode)->i_mmap_sem);
+       filemap_invalidate_lock(inode->i_mapping);
        err = filemap_write_and_wait(inode->i_mapping);
        if (err)
                goto err_out;
@@ -256,7 +256,7 @@ err_out1:
        ext4_double_up_write_data_sem(inode, inode_bl);
 
 err_out:
-       up_write(&EXT4_I(inode)->i_mmap_sem);
+       filemap_invalidate_unlock(inode->i_mapping);
 journal_err_out:
        unlock_two_nondirectories(inode, inode_bl);
        iput(inode_bl);
index bc364c1..cebea42 100644 (file)
@@ -138,7 +138,7 @@ static int kmmpd(void *data)
        unsigned mmp_check_interval;
        unsigned long last_update_time;
        unsigned long diff;
-       int retval;
+       int retval = 0;
 
        mmp_block = le64_to_cpu(es->s_mmp_block);
        mmp = (struct mmp_struct *)(bh->b_data);
index 5fd56f6..f3bbcd4 100644 (file)
@@ -2517,7 +2517,7 @@ again:
                                goto journal_error;
                        err = ext4_handle_dirty_dx_node(handle, dir,
                                                        frame->bh);
-                       if (err)
+                       if (restart || err)
                                goto journal_error;
                } else {
                        struct dx_root *dxroot;
index dfa09a2..d6df62f 100644 (file)
@@ -90,12 +90,9 @@ static struct inode *ext4_get_journal_inode(struct super_block *sb,
 /*
  * Lock ordering
  *
- * Note the difference between i_mmap_sem (EXT4_I(inode)->i_mmap_sem) and
- * i_mmap_rwsem (inode->i_mmap_rwsem)!
- *
  * page fault path:
- * mmap_lock -> sb_start_pagefault -> i_mmap_sem (r) -> transaction start ->
- *   page lock -> i_data_sem (rw)
+ * mmap_lock -> sb_start_pagefault -> invalidate_lock (r) -> transaction start
+ *   -> page lock -> i_data_sem (rw)
  *
  * buffered write path:
  * sb_start_write -> i_mutex -> mmap_lock
@@ -103,8 +100,9 @@ static struct inode *ext4_get_journal_inode(struct super_block *sb,
  *   i_data_sem (rw)
  *
  * truncate:
- * sb_start_write -> i_mutex -> i_mmap_sem (w) -> i_mmap_rwsem (w) -> page lock
- * sb_start_write -> i_mutex -> i_mmap_sem (w) -> transaction start ->
+ * sb_start_write -> i_mutex -> invalidate_lock (w) -> i_mmap_rwsem (w) ->
+ *   page lock
+ * sb_start_write -> i_mutex -> invalidate_lock (w) -> transaction start ->
  *   i_data_sem (rw)
  *
  * direct IO:
@@ -1360,7 +1358,6 @@ static void init_once(void *foo)
        INIT_LIST_HEAD(&ei->i_orphan);
        init_rwsem(&ei->xattr_sem);
        init_rwsem(&ei->i_data_sem);
-       init_rwsem(&ei->i_mmap_sem);
        inode_init_once(&ei->vfs_inode);
        ext4_fc_init_inode(&ei->vfs_inode);
 }
index bcbe366..ce84aa2 100644 (file)
  */
 static inline void ext4_truncate_failed_write(struct inode *inode)
 {
+       struct address_space *mapping = inode->i_mapping;
+
        /*
         * We don't need to call ext4_break_layouts() because the blocks we
         * are truncating were never visible to userspace.
         */
-       down_write(&EXT4_I(inode)->i_mmap_sem);
-       truncate_inode_pages(inode->i_mapping, inode->i_size);
+       filemap_invalidate_lock(mapping);
+       truncate_inode_pages(mapping, inode->i_size);
        ext4_truncate(inode);
-       up_write(&EXT4_I(inode)->i_mmap_sem);
+       filemap_invalidate_unlock(mapping);
 }
 
 /*
index d2cf48c..eb222b3 100644 (file)
@@ -3187,12 +3187,12 @@ static void f2fs_write_failed(struct address_space *mapping, loff_t to)
        /* In the fs-verity case, f2fs_end_enable_verity() does the truncate */
        if (to > i_size && !f2fs_verity_in_progress(inode)) {
                down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-               down_write(&F2FS_I(inode)->i_mmap_sem);
+               filemap_invalidate_lock(mapping);
 
                truncate_pagecache(inode, i_size);
                f2fs_truncate_blocks(inode, i_size, true);
 
-               up_write(&F2FS_I(inode)->i_mmap_sem);
+               filemap_invalidate_unlock(mapping);
                up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
        }
 }
@@ -3852,7 +3852,7 @@ static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk,
        int ret = 0;
 
        down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-       down_write(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_lock(inode->i_mapping);
 
        set_inode_flag(inode, FI_ALIGNED_WRITE);
 
@@ -3894,7 +3894,7 @@ done:
        clear_inode_flag(inode, FI_DO_DEFRAG);
        clear_inode_flag(inode, FI_ALIGNED_WRITE);
 
-       up_write(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_unlock(inode->i_mapping);
        up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
 
        return ret;
index ee8eb33..906b2c4 100644 (file)
@@ -754,7 +754,6 @@ struct f2fs_inode_info {
 
        /* avoid racing between foreground op and gc */
        struct rw_semaphore i_gc_rwsem[2];
-       struct rw_semaphore i_mmap_sem;
        struct rw_semaphore i_xattr_sem; /* avoid racing between reading and changing EAs */
 
        int i_extra_isize;              /* size of extra space located in i_addr */
index 6afd456..1ff3337 100644 (file)
@@ -38,10 +38,7 @@ static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
        struct inode *inode = file_inode(vmf->vma->vm_file);
        vm_fault_t ret;
 
-       down_read(&F2FS_I(inode)->i_mmap_sem);
        ret = filemap_fault(vmf);
-       up_read(&F2FS_I(inode)->i_mmap_sem);
-
        if (!ret)
                f2fs_update_iostat(F2FS_I_SB(inode), APP_MAPPED_READ_IO,
                                                        F2FS_BLKSIZE);
@@ -101,7 +98,7 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
        f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
 
        file_update_time(vmf->vma->vm_file);
-       down_read(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_lock_shared(inode->i_mapping);
        lock_page(page);
        if (unlikely(page->mapping != inode->i_mapping ||
                        page_offset(page) > i_size_read(inode) ||
@@ -159,7 +156,7 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
 
        trace_f2fs_vm_page_mkwrite(page, DATA);
 out_sem:
-       up_read(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_unlock_shared(inode->i_mapping);
 
        sb_end_pagefault(inode->i_sb);
 err:
@@ -940,7 +937,7 @@ int f2fs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
                }
 
                down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-               down_write(&F2FS_I(inode)->i_mmap_sem);
+               filemap_invalidate_lock(inode->i_mapping);
 
                truncate_setsize(inode, attr->ia_size);
 
@@ -950,7 +947,7 @@ int f2fs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
                 * do not trim all blocks after i_size if target size is
                 * larger than i_size.
                 */
-               up_write(&F2FS_I(inode)->i_mmap_sem);
+               filemap_invalidate_unlock(inode->i_mapping);
                up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
                if (err)
                        return err;
@@ -1095,7 +1092,7 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
                        blk_end = (loff_t)pg_end << PAGE_SHIFT;
 
                        down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-                       down_write(&F2FS_I(inode)->i_mmap_sem);
+                       filemap_invalidate_lock(mapping);
 
                        truncate_inode_pages_range(mapping, blk_start,
                                        blk_end - 1);
@@ -1104,7 +1101,7 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
                        ret = f2fs_truncate_hole(inode, pg_start, pg_end);
                        f2fs_unlock_op(sbi);
 
-                       up_write(&F2FS_I(inode)->i_mmap_sem);
+                       filemap_invalidate_unlock(mapping);
                        up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
                }
        }
@@ -1339,7 +1336,7 @@ static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
 
        /* avoid gc operation during block exchange */
        down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-       down_write(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_lock(inode->i_mapping);
 
        f2fs_lock_op(sbi);
        f2fs_drop_extent_tree(inode);
@@ -1347,7 +1344,7 @@ static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
        ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
        f2fs_unlock_op(sbi);
 
-       up_write(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_unlock(inode->i_mapping);
        up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
        return ret;
 }
@@ -1378,13 +1375,13 @@ static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
                return ret;
 
        /* write out all moved pages, if possible */
-       down_write(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_lock(inode->i_mapping);
        filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
        truncate_pagecache(inode, offset);
 
        new_size = i_size_read(inode) - len;
        ret = f2fs_truncate_blocks(inode, new_size, true);
-       up_write(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_unlock(inode->i_mapping);
        if (!ret)
                f2fs_i_size_write(inode, new_size);
        return ret;
@@ -1484,7 +1481,7 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
                        pgoff_t end;
 
                        down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-                       down_write(&F2FS_I(inode)->i_mmap_sem);
+                       filemap_invalidate_lock(mapping);
 
                        truncate_pagecache_range(inode,
                                (loff_t)index << PAGE_SHIFT,
@@ -1496,7 +1493,7 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
                        ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
                        if (ret) {
                                f2fs_unlock_op(sbi);
-                               up_write(&F2FS_I(inode)->i_mmap_sem);
+                               filemap_invalidate_unlock(mapping);
                                up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
                                goto out;
                        }
@@ -1508,7 +1505,7 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
                        f2fs_put_dnode(&dn);
 
                        f2fs_unlock_op(sbi);
-                       up_write(&F2FS_I(inode)->i_mmap_sem);
+                       filemap_invalidate_unlock(mapping);
                        up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
 
                        f2fs_balance_fs(sbi, dn.node_changed);
@@ -1543,6 +1540,7 @@ out:
 static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
 {
        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       struct address_space *mapping = inode->i_mapping;
        pgoff_t nr, pg_start, pg_end, delta, idx;
        loff_t new_size;
        int ret = 0;
@@ -1565,14 +1563,14 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
 
        f2fs_balance_fs(sbi, true);
 
-       down_write(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_lock(mapping);
        ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
-       up_write(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_unlock(mapping);
        if (ret)
                return ret;
 
        /* write out all dirty pages from offset */
-       ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
+       ret = filemap_write_and_wait_range(mapping, offset, LLONG_MAX);
        if (ret)
                return ret;
 
@@ -1583,7 +1581,7 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
 
        /* avoid gc operation during block exchange */
        down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-       down_write(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_lock(mapping);
        truncate_pagecache(inode, offset);
 
        while (!ret && idx > pg_start) {
@@ -1599,14 +1597,14 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
                                        idx + delta, nr, false);
                f2fs_unlock_op(sbi);
        }
-       up_write(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_unlock(mapping);
        up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
 
        /* write out all moved pages, if possible */
-       down_write(&F2FS_I(inode)->i_mmap_sem);
-       filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
+       filemap_invalidate_lock(mapping);
+       filemap_write_and_wait_range(mapping, offset, LLONG_MAX);
        truncate_pagecache(inode, offset);
-       up_write(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_unlock(mapping);
 
        if (!ret)
                f2fs_i_size_write(inode, new_size);
@@ -3440,7 +3438,7 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
                goto out;
 
        down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-       down_write(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_lock(inode->i_mapping);
 
        last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
 
@@ -3476,7 +3474,7 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
        }
 
        up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-       up_write(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_unlock(inode->i_mapping);
 out:
        inode_unlock(inode);
 
@@ -3593,7 +3591,7 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
        }
 
        down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-       down_write(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_lock(inode->i_mapping);
 
        last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
 
@@ -3629,7 +3627,7 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
        }
 
        up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-       up_write(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_unlock(inode->i_mapping);
 
        if (ret >= 0) {
                clear_inode_flag(inode, FI_COMPRESS_RELEASED);
@@ -3748,7 +3746,7 @@ static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
                goto err;
 
        down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-       down_write(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_lock(mapping);
 
        ret = filemap_write_and_wait_range(mapping, range.start,
                        to_end ? LLONG_MAX : end_addr - 1);
@@ -3835,7 +3833,7 @@ static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
                ret = f2fs_secure_erase(prev_bdev, inode, prev_index,
                                prev_block, len, range.flags);
 out:
-       up_write(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_unlock(mapping);
        up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
 err:
        inode_unlock(inode);
@@ -4313,9 +4311,9 @@ write:
                /* if we couldn't write data, we should deallocate blocks. */
                if (preallocated && i_size_read(inode) < target_size) {
                        down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-                       down_write(&F2FS_I(inode)->i_mmap_sem);
+                       filemap_invalidate_lock(inode->i_mapping);
                        f2fs_truncate(inode);
-                       up_write(&F2FS_I(inode)->i_mmap_sem);
+                       filemap_invalidate_unlock(inode->i_mapping);
                        up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
                }
 
index 8fecd30..ce2ab1b 100644 (file)
@@ -1289,7 +1289,6 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb)
        mutex_init(&fi->inmem_lock);
        init_rwsem(&fi->i_gc_rwsem[READ]);
        init_rwsem(&fi->i_gc_rwsem[WRITE]);
-       init_rwsem(&fi->i_mmap_sem);
        init_rwsem(&fi->i_xattr_sem);
 
        /* Will be used by directory only */
index 6642246..daad532 100644 (file)
@@ -378,7 +378,7 @@ out:
                ret = kstrtol(name, 10, &data);
                if (ret)
                        return ret;
-               if (data >= IOPRIO_BE_NR || data < 0)
+               if (data >= IOPRIO_NR_LEVELS || data < 0)
                        return -EINVAL;
 
                cprc->ckpt_thread_ioprio = IOPRIO_PRIO_VALUE(class, data);
index 860e884..978ac67 100644 (file)
@@ -5,6 +5,7 @@
 
 #include <linux/blkdev.h>
 #include <linux/sched/signal.h>
+#include <linux/backing-dev-defs.h>
 #include "fat.h"
 
 struct fatent_operations {
index f946bec..68added 100644 (file)
@@ -150,7 +150,8 @@ void f_delown(struct file *filp)
 pid_t f_getown(struct file *filp)
 {
        pid_t pid = 0;
-       read_lock(&filp->f_owner.lock);
+
+       read_lock_irq(&filp->f_owner.lock);
        rcu_read_lock();
        if (pid_task(filp->f_owner.pid, filp->f_owner.pid_type)) {
                pid = pid_vnr(filp->f_owner.pid);
@@ -158,7 +159,7 @@ pid_t f_getown(struct file *filp)
                        pid = -pid;
        }
        rcu_read_unlock();
-       read_unlock(&filp->f_owner.lock);
+       read_unlock_irq(&filp->f_owner.lock);
        return pid;
 }
 
@@ -208,7 +209,7 @@ static int f_getown_ex(struct file *filp, unsigned long arg)
        struct f_owner_ex owner = {};
        int ret = 0;
 
-       read_lock(&filp->f_owner.lock);
+       read_lock_irq(&filp->f_owner.lock);
        rcu_read_lock();
        if (pid_task(filp->f_owner.pid, filp->f_owner.pid_type))
                owner.pid = pid_vnr(filp->f_owner.pid);
@@ -231,7 +232,7 @@ static int f_getown_ex(struct file *filp, unsigned long arg)
                ret = -EINVAL;
                break;
        }
-       read_unlock(&filp->f_owner.lock);
+       read_unlock_irq(&filp->f_owner.lock);
 
        if (!ret) {
                ret = copy_to_user(owner_p, &owner, sizeof(owner));
@@ -249,10 +250,10 @@ static int f_getowner_uids(struct file *filp, unsigned long arg)
        uid_t src[2];
        int err;
 
-       read_lock(&filp->f_owner.lock);
+       read_lock_irq(&filp->f_owner.lock);
        src[0] = from_kuid(user_ns, filp->f_owner.uid);
        src[1] = from_kuid(user_ns, filp->f_owner.euid);
-       read_unlock(&filp->f_owner.lock);
+       read_unlock_irq(&filp->f_owner.lock);
 
        err  = put_user(src[0], &dst[0]);
        err |= put_user(src[1], &dst[1]);
@@ -1003,13 +1004,14 @@ static void kill_fasync_rcu(struct fasync_struct *fa, int sig, int band)
 {
        while (fa) {
                struct fown_struct *fown;
+               unsigned long flags;
 
                if (fa->magic != FASYNC_MAGIC) {
                        printk(KERN_ERR "kill_fasync: bad magic number in "
                               "fasync_struct!\n");
                        return;
                }
-               read_lock(&fa->fa_lock);
+               read_lock_irqsave(&fa->fa_lock, flags);
                if (fa->fa_file) {
                        fown = &fa->fa_file->f_owner;
                        /* Don't send SIGURG to processes which have not set a
@@ -1018,7 +1020,7 @@ static void kill_fasync_rcu(struct fasync_struct *fa, int sig, int band)
                        if (!(sig == SIGURG && fown->signum == 0))
                                send_sigio(fown, fa->fa_fd, band);
                }
-               read_unlock(&fa->fa_lock);
+               read_unlock_irqrestore(&fa->fa_lock, flags);
                fa = rcu_dereference(fa->fa_next);
        }
 }
index e557237..281d79f 100644 (file)
@@ -444,12 +444,12 @@ static int fuse_setup_new_dax_mapping(struct inode *inode, loff_t pos,
        /*
         * Can't do inline reclaim in fault path. We call
         * dax_layout_busy_page() before we free a range. And
-        * fuse_wait_dax_page() drops fi->i_mmap_sem lock and requires it.
-        * In fault path we enter with fi->i_mmap_sem held and can't drop
-        * it. Also in fault path we hold fi->i_mmap_sem shared and not
-        * exclusive, so that creates further issues with fuse_wait_dax_page().
-        * Hence return -EAGAIN and fuse_dax_fault() will wait for a memory
-        * range to become free and retry.
+        * fuse_wait_dax_page() drops mapping->invalidate_lock and requires it.
+        * In fault path we enter with mapping->invalidate_lock held and can't
+        * drop it. Also in fault path we hold mapping->invalidate_lock shared
+        * and not exclusive, so that creates further issues with
+        * fuse_wait_dax_page().  Hence return -EAGAIN and fuse_dax_fault()
+        * will wait for a memory range to become free and retry.
         */
        if (flags & IOMAP_FAULT) {
                alloc_dmap = alloc_dax_mapping(fcd);
@@ -513,7 +513,7 @@ static int fuse_upgrade_dax_mapping(struct inode *inode, loff_t pos,
        down_write(&fi->dax->sem);
        node = interval_tree_iter_first(&fi->dax->tree, idx, idx);
 
-       /* We are holding either inode lock or i_mmap_sem, and that should
+       /* We are holding either inode lock or invalidate_lock, and that should
         * ensure that dmap can't be truncated. We are holding a reference
         * on dmap and that should make sure it can't be reclaimed. So dmap
         * should still be there in tree despite the fact we dropped and
@@ -660,14 +660,12 @@ static const struct iomap_ops fuse_iomap_ops = {
 
 static void fuse_wait_dax_page(struct inode *inode)
 {
-       struct fuse_inode *fi = get_fuse_inode(inode);
-
-       up_write(&fi->i_mmap_sem);
+       filemap_invalidate_unlock(inode->i_mapping);
        schedule();
-       down_write(&fi->i_mmap_sem);
+       filemap_invalidate_lock(inode->i_mapping);
 }
 
-/* Should be called with fi->i_mmap_sem lock held exclusively */
+/* Should be called with mapping->invalidate_lock held exclusively */
 static int __fuse_dax_break_layouts(struct inode *inode, bool *retry,
                                    loff_t start, loff_t end)
 {
@@ -813,18 +811,18 @@ retry:
         * we do not want any read/write/mmap to make progress and try
         * to populate page cache or access memory we are trying to free.
         */
-       down_read(&get_fuse_inode(inode)->i_mmap_sem);
+       filemap_invalidate_lock_shared(inode->i_mapping);
        ret = dax_iomap_fault(vmf, pe_size, &pfn, &error, &fuse_iomap_ops);
        if ((ret & VM_FAULT_ERROR) && error == -EAGAIN) {
                error = 0;
                retry = true;
-               up_read(&get_fuse_inode(inode)->i_mmap_sem);
+               filemap_invalidate_unlock_shared(inode->i_mapping);
                goto retry;
        }
 
        if (ret & VM_FAULT_NEEDDSYNC)
                ret = dax_finish_sync_fault(vmf, pe_size, pfn);
-       up_read(&get_fuse_inode(inode)->i_mmap_sem);
+       filemap_invalidate_unlock_shared(inode->i_mapping);
 
        if (write)
                sb_end_pagefault(sb);
@@ -960,7 +958,7 @@ inode_inline_reclaim_one_dmap(struct fuse_conn_dax *fcd, struct inode *inode,
        int ret;
        struct interval_tree_node *node;
 
-       down_write(&fi->i_mmap_sem);
+       filemap_invalidate_lock(inode->i_mapping);
 
        /* Lookup a dmap and corresponding file offset to reclaim. */
        down_read(&fi->dax->sem);
@@ -1021,7 +1019,7 @@ inode_inline_reclaim_one_dmap(struct fuse_conn_dax *fcd, struct inode *inode,
 out_write_dmap_sem:
        up_write(&fi->dax->sem);
 out_mmap_sem:
-       up_write(&fi->i_mmap_sem);
+       filemap_invalidate_unlock(inode->i_mapping);
        return dmap;
 }
 
@@ -1050,10 +1048,10 @@ alloc_dax_mapping_reclaim(struct fuse_conn_dax *fcd, struct inode *inode)
                 * had a reference or some other temporary failure,
                 * Try again. We want to give up inline reclaim only
                 * if there is no range assigned to this node. Otherwise
-                * if a deadlock is possible if we sleep with fi->i_mmap_sem
-                * held and worker to free memory can't make progress due
-                * to unavailability of fi->i_mmap_sem lock. So sleep
-                * only if fi->dax->nr=0
+                * if a deadlock is possible if we sleep with
+                * mapping->invalidate_lock held and worker to free memory
+                * can't make progress due to unavailability of
+                * mapping->invalidate_lock.  So sleep only if fi->dax->nr=0
                 */
                if (retry)
                        continue;
@@ -1061,8 +1059,8 @@ alloc_dax_mapping_reclaim(struct fuse_conn_dax *fcd, struct inode *inode)
                 * There are no mappings which can be reclaimed. Wait for one.
                 * We are not holding fi->dax->sem. So it is possible
                 * that range gets added now. But as we are not holding
-                * fi->i_mmap_sem, worker should still be able to free up
-                * a range and wake us up.
+                * mapping->invalidate_lock, worker should still be able to
+                * free up a range and wake us up.
                 */
                if (!fi->dax->nr && !(fcd->nr_free_ranges > 0)) {
                        if (wait_event_killable_exclusive(fcd->range_waitq,
@@ -1108,7 +1106,7 @@ static int lookup_and_reclaim_dmap_locked(struct fuse_conn_dax *fcd,
 /*
  * Free a range of memory.
  * Locking:
- * 1. Take fi->i_mmap_sem to block dax faults.
+ * 1. Take mapping->invalidate_lock to block dax faults.
  * 2. Take fi->dax->sem to protect interval tree and also to make sure
  *    read/write can not reuse a dmap which we might be freeing.
  */
@@ -1122,7 +1120,7 @@ static int lookup_and_reclaim_dmap(struct fuse_conn_dax *fcd,
        loff_t dmap_start = start_idx << FUSE_DAX_SHIFT;
        loff_t dmap_end = (dmap_start + FUSE_DAX_SZ) - 1;
 
-       down_write(&fi->i_mmap_sem);
+       filemap_invalidate_lock(inode->i_mapping);
        ret = fuse_dax_break_layouts(inode, dmap_start, dmap_end);
        if (ret) {
                pr_debug("virtio_fs: fuse_dax_break_layouts() failed. err=%d\n",
@@ -1134,7 +1132,7 @@ static int lookup_and_reclaim_dmap(struct fuse_conn_dax *fcd,
        ret = lookup_and_reclaim_dmap_locked(fcd, inode, start_idx);
        up_write(&fi->dax->sem);
 out_mmap_sem:
-       up_write(&fi->i_mmap_sem);
+       filemap_invalidate_unlock(inode->i_mapping);
        return ret;
 }
 
@@ -1235,8 +1233,6 @@ void fuse_dax_conn_free(struct fuse_conn *fc)
 static int fuse_dax_mem_range_init(struct fuse_conn_dax *fcd)
 {
        long nr_pages, nr_ranges;
-       void *kaddr;
-       pfn_t pfn;
        struct fuse_dax_mapping *range;
        int ret, id;
        size_t dax_size = -1;
@@ -1248,8 +1244,8 @@ static int fuse_dax_mem_range_init(struct fuse_conn_dax *fcd)
        INIT_DELAYED_WORK(&fcd->free_work, fuse_dax_free_mem_worker);
 
        id = dax_read_lock();
-       nr_pages = dax_direct_access(fcd->dev, 0, PHYS_PFN(dax_size), &kaddr,
-                                    &pfn);
+       nr_pages = dax_direct_access(fcd->dev, 0, PHYS_PFN(dax_size), NULL,
+                                    NULL);
        dax_read_unlock(id);
        if (nr_pages < 0) {
                pr_debug("dax_direct_access() returned %ld\n", nr_pages);
index eade6f9..d9b977c 100644 (file)
@@ -1556,6 +1556,7 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
        struct fuse_mount *fm = get_fuse_mount(inode);
        struct fuse_conn *fc = fm->fc;
        struct fuse_inode *fi = get_fuse_inode(inode);
+       struct address_space *mapping = inode->i_mapping;
        FUSE_ARGS(args);
        struct fuse_setattr_in inarg;
        struct fuse_attr_out outarg;
@@ -1580,11 +1581,11 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
        }
 
        if (FUSE_IS_DAX(inode) && is_truncate) {
-               down_write(&fi->i_mmap_sem);
+               filemap_invalidate_lock(mapping);
                fault_blocked = true;
                err = fuse_dax_break_layouts(inode, 0, 0);
                if (err) {
-                       up_write(&fi->i_mmap_sem);
+                       filemap_invalidate_unlock(mapping);
                        return err;
                }
        }
@@ -1694,13 +1695,13 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
        if ((is_truncate || !is_wb) &&
            S_ISREG(inode->i_mode) && oldsize != outarg.attr.size) {
                truncate_pagecache(inode, outarg.attr.size);
-               invalidate_inode_pages2(inode->i_mapping);
+               invalidate_inode_pages2(mapping);
        }
 
        clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
 out:
        if (fault_blocked)
-               up_write(&fi->i_mmap_sem);
+               filemap_invalidate_unlock(mapping);
 
        return 0;
 
@@ -1711,7 +1712,7 @@ error:
        clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
 
        if (fault_blocked)
-               up_write(&fi->i_mmap_sem);
+               filemap_invalidate_unlock(mapping);
        return err;
 }
 
index 97f860c..621a662 100644 (file)
@@ -243,7 +243,7 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
        }
 
        if (dax_truncate) {
-               down_write(&get_fuse_inode(inode)->i_mmap_sem);
+               filemap_invalidate_lock(inode->i_mapping);
                err = fuse_dax_break_layouts(inode, 0, 0);
                if (err)
                        goto out;
@@ -255,7 +255,7 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
 
 out:
        if (dax_truncate)
-               up_write(&get_fuse_inode(inode)->i_mmap_sem);
+               filemap_invalidate_unlock(inode->i_mapping);
 
        if (is_wb_truncate | dax_truncate) {
                fuse_release_nowrite(inode);
@@ -2920,7 +2920,7 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
        if (lock_inode) {
                inode_lock(inode);
                if (block_faults) {
-                       down_write(&fi->i_mmap_sem);
+                       filemap_invalidate_lock(inode->i_mapping);
                        err = fuse_dax_break_layouts(inode, 0, 0);
                        if (err)
                                goto out;
@@ -2976,7 +2976,7 @@ out:
                clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
 
        if (block_faults)
-               up_write(&fi->i_mmap_sem);
+               filemap_invalidate_unlock(inode->i_mapping);
 
        if (lock_inode)
                inode_unlock(inode);
@@ -3045,7 +3045,7 @@ static ssize_t __fuse_copy_file_range(struct file *file_in, loff_t pos_in,
         * modifications.  Yet this does give less guarantees than if the
         * copying was performed with write(2).
         *
-        * To fix this a i_mmap_sem style lock could be used to prevent new
+        * To fix this a mapping->invalidate_lock could be used to prevent new
         * faults while the copy is ongoing.
         */
        err = fuse_writeback_range(inode_out, pos_out, pos_out + len - 1);
index 07829ce..6fb639b 100644 (file)
@@ -149,13 +149,6 @@ struct fuse_inode {
        /** Lock to protect write related fields */
        spinlock_t lock;
 
-       /**
-        * Can't take inode lock in fault path (leads to circular dependency).
-        * Introduce another semaphore which can be taken in fault path and
-        * then other filesystem paths can take this to block faults.
-        */
-       struct rw_semaphore i_mmap_sem;
-
 #ifdef CONFIG_FUSE_DAX
        /*
         * Dax specific inode data
index b9beb39..e07e429 100644 (file)
@@ -85,7 +85,6 @@ static struct inode *fuse_alloc_inode(struct super_block *sb)
        fi->orig_ino = 0;
        fi->state = 0;
        mutex_init(&fi->mutex);
-       init_rwsem(&fi->i_mmap_sem);
        spin_lock_init(&fi->lock);
        fi->forget = fuse_alloc_forget();
        if (!fi->forget)
index 84ec053..c559827 100644 (file)
@@ -1237,9 +1237,6 @@ static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
 
        if (!(fl->fl_flags & FL_POSIX))
                return -ENOLCK;
-       if (__mandatory_lock(&ip->i_inode) && fl->fl_type != F_UNLCK)
-               return -ENOLCK;
-
        if (cmd == F_CANCELLK) {
                /* Hack: */
                cmd = F_SETLK;
index 2b36dc6..ec975f4 100644 (file)
@@ -2,6 +2,7 @@
 config HPFS_FS
        tristate "OS/2 HPFS file system support"
        depends on BLOCK
+       select FS_IOMAP
        help
          OS/2 is IBM's operating system for PC's, the same as Warp, and HPFS
          is the file system used for organizing files on OS/2 hard disk
index c3a49aa..fb37f57 100644 (file)
@@ -9,6 +9,7 @@
 
 #include "hpfs_fn.h"
 #include <linux/mpage.h>
+#include <linux/iomap.h>
 #include <linux/fiemap.h>
 
 #define BLOCKS(size) (((size) + 511) >> 9)
@@ -116,6 +117,47 @@ static int hpfs_get_block(struct inode *inode, sector_t iblock, struct buffer_he
        return r;
 }
 
+static int hpfs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
+               unsigned flags, struct iomap *iomap, struct iomap *srcmap)
+{
+       struct super_block *sb = inode->i_sb;
+       unsigned int blkbits = inode->i_blkbits;
+       unsigned int n_secs;
+       secno s;
+
+       if (WARN_ON_ONCE(flags & (IOMAP_WRITE | IOMAP_ZERO)))
+               return -EINVAL;
+
+       iomap->bdev = inode->i_sb->s_bdev;
+       iomap->offset = offset;
+
+       hpfs_lock(sb);
+       s = hpfs_bmap(inode, offset >> blkbits, &n_secs);
+       if (s) {
+               n_secs = hpfs_search_hotfix_map_for_range(sb, s,
+                               min_t(loff_t, n_secs, length));
+               if (unlikely(!n_secs)) {
+                       s = hpfs_search_hotfix_map(sb, s);
+                       n_secs = 1;
+               }
+               iomap->type = IOMAP_MAPPED;
+               iomap->flags = IOMAP_F_MERGED;
+               iomap->addr = (u64)s << blkbits;
+               iomap->length = (u64)n_secs << blkbits;
+       } else {
+               iomap->type = IOMAP_HOLE;
+               iomap->addr = IOMAP_NULL_ADDR;
+               iomap->length = 1 << blkbits;
+       }
+
+       hpfs_unlock(sb);
+       return 0;
+}
+
+static const struct iomap_ops hpfs_iomap_ops = {
+       .iomap_begin            = hpfs_iomap_begin,
+};
+
 static int hpfs_readpage(struct file *file, struct page *page)
 {
        return mpage_readpage(page, hpfs_get_block);
@@ -192,7 +234,14 @@ static sector_t _hpfs_bmap(struct address_space *mapping, sector_t block)
 
 static int hpfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 len)
 {
-       return generic_block_fiemap(inode, fieinfo, start, len, hpfs_get_block);
+       int ret;
+
+       inode_lock(inode);
+       len = min_t(u64, len, i_size_read(inode));
+       ret = iomap_fiemap(inode, fieinfo, start, len, &hpfs_iomap_ops);
+       inode_unlock(inode);
+
+       return ret;
 }
 
 const struct address_space_operations hpfs_aops = {
index c93500d..84c528c 100644 (file)
@@ -190,6 +190,8 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
        mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
        mapping->private_data = NULL;
        mapping->writeback_index = 0;
+       __init_rwsem(&mapping->invalidate_lock, "mapping.invalidate_lock",
+                    &sb->s_type->invalidate_lock_key);
        inode->i_private = NULL;
        inode->i_mapping = mapping;
        INIT_HLIST_HEAD(&inode->i_dentry);      /* buggered by rcu freeing */
index cf086b0..7d2ed8c 100644 (file)
@@ -129,7 +129,8 @@ struct io_cb_cancel_data {
        bool cancel_all;
 };
 
-static void create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index);
+static void create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index, bool first);
+static void io_wqe_dec_running(struct io_worker *worker);
 
 static bool io_worker_get(struct io_worker *worker)
 {
@@ -168,26 +169,21 @@ static void io_worker_exit(struct io_worker *worker)
 {
        struct io_wqe *wqe = worker->wqe;
        struct io_wqe_acct *acct = io_wqe_get_acct(worker);
-       unsigned flags;
 
        if (refcount_dec_and_test(&worker->ref))
                complete(&worker->ref_done);
        wait_for_completion(&worker->ref_done);
 
-       preempt_disable();
-       current->flags &= ~PF_IO_WORKER;
-       flags = worker->flags;
-       worker->flags = 0;
-       if (flags & IO_WORKER_F_RUNNING)
-               atomic_dec(&acct->nr_running);
-       worker->flags = 0;
-       preempt_enable();
-
        raw_spin_lock_irq(&wqe->lock);
-       if (flags & IO_WORKER_F_FREE)
+       if (worker->flags & IO_WORKER_F_FREE)
                hlist_nulls_del_rcu(&worker->nulls_node);
        list_del_rcu(&worker->all_list);
        acct->nr_workers--;
+       preempt_disable();
+       io_wqe_dec_running(worker);
+       worker->flags = 0;
+       current->flags &= ~PF_IO_WORKER;
+       preempt_enable();
        raw_spin_unlock_irq(&wqe->lock);
 
        kfree_rcu(worker, rcu);
@@ -214,15 +210,19 @@ static bool io_wqe_activate_free_worker(struct io_wqe *wqe)
        struct hlist_nulls_node *n;
        struct io_worker *worker;
 
-       n = rcu_dereference(hlist_nulls_first_rcu(&wqe->free_list));
-       if (is_a_nulls(n))
-               return false;
-
-       worker = hlist_nulls_entry(n, struct io_worker, nulls_node);
-       if (io_worker_get(worker)) {
-               wake_up_process(worker->task);
+       /*
+        * Iterate free_list and see if we can find an idle worker to
+        * activate. If a given worker is on the free_list but in the process
+        * of exiting, keep trying.
+        */
+       hlist_nulls_for_each_entry_rcu(worker, n, &wqe->free_list, nulls_node) {
+               if (!io_worker_get(worker))
+                       continue;
+               if (wake_up_process(worker->task)) {
+                       io_worker_release(worker);
+                       return true;
+               }
                io_worker_release(worker);
-               return true;
        }
 
        return false;
@@ -247,10 +247,21 @@ static void io_wqe_wake_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
        ret = io_wqe_activate_free_worker(wqe);
        rcu_read_unlock();
 
-       if (!ret && acct->nr_workers < acct->max_workers) {
-               atomic_inc(&acct->nr_running);
-               atomic_inc(&wqe->wq->worker_refs);
-               create_io_worker(wqe->wq, wqe, acct->index);
+       if (!ret) {
+               bool do_create = false, first = false;
+
+               raw_spin_lock_irq(&wqe->lock);
+               if (acct->nr_workers < acct->max_workers) {
+                       atomic_inc(&acct->nr_running);
+                       atomic_inc(&wqe->wq->worker_refs);
+                       if (!acct->nr_workers)
+                               first = true;
+                       acct->nr_workers++;
+                       do_create = true;
+               }
+               raw_spin_unlock_irq(&wqe->lock);
+               if (do_create)
+                       create_io_worker(wqe->wq, wqe, acct->index, first);
        }
 }
 
@@ -271,10 +282,28 @@ static void create_worker_cb(struct callback_head *cb)
 {
        struct create_worker_data *cwd;
        struct io_wq *wq;
+       struct io_wqe *wqe;
+       struct io_wqe_acct *acct;
+       bool do_create = false, first = false;
 
        cwd = container_of(cb, struct create_worker_data, work);
-       wq = cwd->wqe->wq;
-       create_io_worker(wq, cwd->wqe, cwd->index);
+       wqe = cwd->wqe;
+       wq = wqe->wq;
+       acct = &wqe->acct[cwd->index];
+       raw_spin_lock_irq(&wqe->lock);
+       if (acct->nr_workers < acct->max_workers) {
+               if (!acct->nr_workers)
+                       first = true;
+               acct->nr_workers++;
+               do_create = true;
+       }
+       raw_spin_unlock_irq(&wqe->lock);
+       if (do_create) {
+               create_io_worker(wq, wqe, cwd->index, first);
+       } else {
+               atomic_dec(&acct->nr_running);
+               io_worker_ref_put(wq);
+       }
        kfree(cwd);
 }
 
@@ -612,7 +641,7 @@ void io_wq_worker_sleeping(struct task_struct *tsk)
        raw_spin_unlock_irq(&worker->wqe->lock);
 }
 
-static void create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
+static void create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index, bool first)
 {
        struct io_wqe_acct *acct = &wqe->acct[index];
        struct io_worker *worker;
@@ -635,6 +664,9 @@ static void create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
                kfree(worker);
 fail:
                atomic_dec(&acct->nr_running);
+               raw_spin_lock_irq(&wqe->lock);
+               acct->nr_workers--;
+               raw_spin_unlock_irq(&wqe->lock);
                io_worker_ref_put(wq);
                return;
        }
@@ -650,9 +682,8 @@ fail:
        worker->flags |= IO_WORKER_F_FREE;
        if (index == IO_WQ_ACCT_BOUND)
                worker->flags |= IO_WORKER_F_BOUND;
-       if (!acct->nr_workers && (worker->flags & IO_WORKER_F_BOUND))
+       if (first && (worker->flags & IO_WORKER_F_BOUND))
                worker->flags |= IO_WORKER_F_FIXED;
-       acct->nr_workers++;
        raw_spin_unlock_irq(&wqe->lock);
        wake_up_new_task(tsk);
 }
index 5a0fd6b..a2e20a6 100644 (file)
@@ -78,6 +78,7 @@
 #include <linux/task_work.h>
 #include <linux/pagemap.h>
 #include <linux/io_uring.h>
+#include <linux/tracehook.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/io_uring.h>
@@ -1279,8 +1280,17 @@ static void io_prep_async_link(struct io_kiocb *req)
 {
        struct io_kiocb *cur;
 
-       io_for_each_link(cur, req)
-               io_prep_async_work(cur);
+       if (req->flags & REQ_F_LINK_TIMEOUT) {
+               struct io_ring_ctx *ctx = req->ctx;
+
+               spin_lock_irq(&ctx->completion_lock);
+               io_for_each_link(cur, req)
+                       io_prep_async_work(cur);
+               spin_unlock_irq(&ctx->completion_lock);
+       } else {
+               io_for_each_link(cur, req)
+                       io_prep_async_work(cur);
+       }
 }
 
 static void io_queue_async_work(struct io_kiocb *req)
@@ -1490,7 +1500,8 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
        all_flushed = list_empty(&ctx->cq_overflow_list);
        if (all_flushed) {
                clear_bit(0, &ctx->check_cq_overflow);
-               ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW;
+               WRITE_ONCE(ctx->rings->sq_flags,
+                          ctx->rings->sq_flags & ~IORING_SQ_CQ_OVERFLOW);
        }
 
        if (posted)
@@ -1569,7 +1580,9 @@ static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
        }
        if (list_empty(&ctx->cq_overflow_list)) {
                set_bit(0, &ctx->check_cq_overflow);
-               ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW;
+               WRITE_ONCE(ctx->rings->sq_flags,
+                          ctx->rings->sq_flags | IORING_SQ_CQ_OVERFLOW);
+
        }
        ocqe->cqe.user_data = user_data;
        ocqe->cqe.res = res;
@@ -1950,9 +1963,13 @@ static void tctx_task_work(struct callback_head *cb)
                        node = next;
                }
                if (wq_list_empty(&tctx->task_list)) {
+                       spin_lock_irq(&tctx->task_lock);
                        clear_bit(0, &tctx->task_state);
-                       if (wq_list_empty(&tctx->task_list))
+                       if (wq_list_empty(&tctx->task_list)) {
+                               spin_unlock_irq(&tctx->task_lock);
                                break;
+                       }
+                       spin_unlock_irq(&tctx->task_lock);
                        /* another tctx_task_work() is enqueued, yield */
                        if (test_and_set_bit(0, &tctx->task_state))
                                break;
@@ -2047,6 +2064,12 @@ static void io_req_task_queue(struct io_kiocb *req)
        io_req_task_work_add(req);
 }
 
+static void io_req_task_queue_reissue(struct io_kiocb *req)
+{
+       req->io_task_work.func = io_queue_async_work;
+       io_req_task_work_add(req);
+}
+
 static inline void io_queue_next(struct io_kiocb *req)
 {
        struct io_kiocb *nxt = io_req_find_next(req);
@@ -2203,9 +2226,9 @@ static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req)
 
 static inline bool io_run_task_work(void)
 {
-       if (current->task_works) {
+       if (test_thread_flag(TIF_NOTIFY_SIGNAL) || current->task_works) {
                __set_current_state(TASK_RUNNING);
-               task_work_run();
+               tracehook_notify_signal();
                return true;
        }
 
@@ -2235,7 +2258,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
                    !(req->flags & REQ_F_DONT_REISSUE)) {
                        req->iopoll_completed = 0;
                        req_ref_get(req);
-                       io_queue_async_work(req);
+                       io_req_task_queue_reissue(req);
                        continue;
                }
 
@@ -2428,6 +2451,12 @@ static bool io_rw_should_reissue(struct io_kiocb *req)
         */
        if (percpu_ref_is_dying(&ctx->refs))
                return false;
+       /*
+        * Play it safe and assume not safe to re-import and reissue if we're
+        * not in the original thread group (or in task context).
+        */
+       if (!same_thread_group(req->task, current) || !in_task())
+               return false;
        return true;
 }
 #else
@@ -2448,8 +2477,10 @@ static void io_fallback_req_func(struct work_struct *work)
        struct llist_node *node = llist_del_all(&ctx->fallback_llist);
        struct io_kiocb *req, *tmp;
 
+       percpu_ref_get(&ctx->refs);
        llist_for_each_entry_safe(req, tmp, node, io_task_work.fallback_node)
                req->io_task_work.func(req);
+       percpu_ref_put(&ctx->refs);
 }
 
 static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
@@ -2758,7 +2789,7 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
                req->flags &= ~REQ_F_REISSUE;
                if (io_resubmit_prep(req)) {
                        req_ref_get(req);
-                       io_queue_async_work(req);
+                       io_req_task_queue_reissue(req);
                } else {
                        int cflags = 0;
 
@@ -4914,7 +4945,6 @@ static bool io_poll_complete(struct io_kiocb *req, __poll_t mask)
        if (req->poll.events & EPOLLONESHOT)
                flags = 0;
        if (!io_cqring_fill_event(ctx, req->user_data, error, flags)) {
-               io_poll_remove_waitqs(req);
                req->poll.done = true;
                flags = 0;
        }
@@ -4937,6 +4967,7 @@ static void io_poll_task_func(struct io_kiocb *req)
 
                done = io_poll_complete(req, req->result);
                if (done) {
+                       io_poll_remove_double(req);
                        hash_del(&req->hash_node);
                } else {
                        req->result = 0;
@@ -5124,7 +5155,7 @@ static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
                ipt->error = -EINVAL;
 
        spin_lock_irq(&ctx->completion_lock);
-       if (ipt->error)
+       if (ipt->error || (mask && (poll->events & EPOLLONESHOT)))
                io_poll_remove_double(req);
        if (likely(poll->head)) {
                spin_lock(&poll->head->lock);
@@ -5196,7 +5227,6 @@ static int io_arm_poll_handler(struct io_kiocb *req)
        ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
                                        io_async_wake);
        if (ret || ipt.error) {
-               io_poll_remove_double(req);
                spin_unlock_irq(&ctx->completion_lock);
                if (ret)
                        return IO_APOLL_READY;
@@ -6779,14 +6809,16 @@ static inline void io_ring_set_wakeup_flag(struct io_ring_ctx *ctx)
 {
        /* Tell userspace we may need a wakeup call */
        spin_lock_irq(&ctx->completion_lock);
-       ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
+       WRITE_ONCE(ctx->rings->sq_flags,
+                  ctx->rings->sq_flags | IORING_SQ_NEED_WAKEUP);
        spin_unlock_irq(&ctx->completion_lock);
 }
 
 static inline void io_ring_clear_wakeup_flag(struct io_ring_ctx *ctx)
 {
        spin_lock_irq(&ctx->completion_lock);
-       ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
+       WRITE_ONCE(ctx->rings->sq_flags,
+                  ctx->rings->sq_flags & ~IORING_SQ_NEED_WAKEUP);
        spin_unlock_irq(&ctx->completion_lock);
 }
 
@@ -7108,16 +7140,6 @@ static void **io_alloc_page_table(size_t size)
        return table;
 }
 
-static inline void io_rsrc_ref_lock(struct io_ring_ctx *ctx)
-{
-       spin_lock_bh(&ctx->rsrc_ref_lock);
-}
-
-static inline void io_rsrc_ref_unlock(struct io_ring_ctx *ctx)
-{
-       spin_unlock_bh(&ctx->rsrc_ref_lock);
-}
-
 static void io_rsrc_node_destroy(struct io_rsrc_node *ref_node)
 {
        percpu_ref_exit(&ref_node->refs);
@@ -7134,9 +7156,9 @@ static void io_rsrc_node_switch(struct io_ring_ctx *ctx,
                struct io_rsrc_node *rsrc_node = ctx->rsrc_node;
 
                rsrc_node->rsrc_data = data_to_kill;
-               io_rsrc_ref_lock(ctx);
+               spin_lock_irq(&ctx->rsrc_ref_lock);
                list_add_tail(&rsrc_node->node, &ctx->rsrc_ref_list);
-               io_rsrc_ref_unlock(ctx);
+               spin_unlock_irq(&ctx->rsrc_ref_lock);
 
                atomic_inc(&data_to_kill->refs);
                percpu_ref_kill(&rsrc_node->refs);
@@ -7175,17 +7197,19 @@ static int io_rsrc_ref_quiesce(struct io_rsrc_data *data, struct io_ring_ctx *ct
                /* kill initial ref, already quiesced if zero */
                if (atomic_dec_and_test(&data->refs))
                        break;
+               mutex_unlock(&ctx->uring_lock);
                flush_delayed_work(&ctx->rsrc_put_work);
                ret = wait_for_completion_interruptible(&data->done);
-               if (!ret)
+               if (!ret) {
+                       mutex_lock(&ctx->uring_lock);
                        break;
+               }
 
                atomic_inc(&data->refs);
                /* wait for all works potentially completing data->done */
                flush_delayed_work(&ctx->rsrc_put_work);
                reinit_completion(&data->done);
 
-               mutex_unlock(&ctx->uring_lock);
                ret = io_run_task_work_sig();
                mutex_lock(&ctx->uring_lock);
        } while (ret >= 0);
@@ -7644,9 +7668,10 @@ static void io_rsrc_node_ref_zero(struct percpu_ref *ref)
 {
        struct io_rsrc_node *node = container_of(ref, struct io_rsrc_node, refs);
        struct io_ring_ctx *ctx = node->rsrc_data->ctx;
+       unsigned long flags;
        bool first_add = false;
 
-       io_rsrc_ref_lock(ctx);
+       spin_lock_irqsave(&ctx->rsrc_ref_lock, flags);
        node->done = true;
 
        while (!list_empty(&ctx->rsrc_ref_list)) {
@@ -7658,7 +7683,7 @@ static void io_rsrc_node_ref_zero(struct percpu_ref *ref)
                list_del(&node->node);
                first_add |= llist_add(&node->llist, &ctx->rsrc_put_llist);
        }
-       io_rsrc_ref_unlock(ctx);
+       spin_unlock_irqrestore(&ctx->rsrc_ref_lock, flags);
 
        if (first_add)
                mod_delayed_work(system_wq, &ctx->rsrc_put_work, HZ);
@@ -8629,13 +8654,10 @@ static void io_req_caches_free(struct io_ring_ctx *ctx)
        mutex_unlock(&ctx->uring_lock);
 }
 
-static bool io_wait_rsrc_data(struct io_rsrc_data *data)
+static void io_wait_rsrc_data(struct io_rsrc_data *data)
 {
-       if (!data)
-               return false;
-       if (!atomic_dec_and_test(&data->refs))
+       if (data && !atomic_dec_and_test(&data->refs))
                wait_for_completion(&data->done);
-       return true;
 }
 
 static void io_ring_ctx_free(struct io_ring_ctx *ctx)
@@ -8647,10 +8669,14 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx)
                ctx->mm_account = NULL;
        }
 
+       /* __io_rsrc_put_work() may need uring_lock to progress, wait w/o it */
+       io_wait_rsrc_data(ctx->buf_data);
+       io_wait_rsrc_data(ctx->file_data);
+
        mutex_lock(&ctx->uring_lock);
-       if (io_wait_rsrc_data(ctx->buf_data))
+       if (ctx->buf_data)
                __io_sqe_buffers_unregister(ctx);
-       if (io_wait_rsrc_data(ctx->file_data))
+       if (ctx->file_data)
                __io_sqe_files_unregister(ctx);
        if (ctx->rings)
                __io_cqring_overflow_flush(ctx, true);
@@ -9346,9 +9372,10 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
        if (ctx->flags & IORING_SETUP_SQPOLL) {
                io_cqring_overflow_flush(ctx, false);
 
-               ret = -EOWNERDEAD;
-               if (unlikely(ctx->sq_data->thread == NULL))
+               if (unlikely(ctx->sq_data->thread == NULL)) {
+                       ret = -EOWNERDEAD;
                        goto out;
+               }
                if (flags & IORING_ENTER_SQ_WAKEUP)
                        wake_up(&ctx->sq_data->wait);
                if (flags & IORING_ENTER_SQ_WAIT) {
@@ -9816,10 +9843,11 @@ static int io_register_personality(struct io_ring_ctx *ctx)
 
        ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)creds,
                        XA_LIMIT(0, USHRT_MAX), &ctx->pers_next, GFP_KERNEL);
-       if (!ret)
-               return id;
-       put_cred(creds);
-       return ret;
+       if (ret < 0) {
+               put_cred(creds);
+               return ret;
+       }
+       return id;
 }
 
 static int io_register_restrictions(struct io_ring_ctx *ctx, void __user *arg,
index 1e2204f..eea8267 100644 (file)
@@ -263,209 +263,6 @@ static long ioctl_file_clone_range(struct file *file,
                                args.src_length, args.dest_offset);
 }
 
-#ifdef CONFIG_BLOCK
-
-static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
-{
-       return (offset >> inode->i_blkbits);
-}
-
-static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
-{
-       return (blk << inode->i_blkbits);
-}
-
-/**
- * __generic_block_fiemap - FIEMAP for block based inodes (no locking)
- * @inode: the inode to map
- * @fieinfo: the fiemap info struct that will be passed back to userspace
- * @start: where to start mapping in the inode
- * @len: how much space to map
- * @get_block: the fs's get_block function
- *
- * This does FIEMAP for block based inodes.  Basically it will just loop
- * through get_block until we hit the number of extents we want to map, or we
- * go past the end of the file and hit a hole.
- *
- * If it is possible to have data blocks beyond a hole past @inode->i_size, then
- * please do not use this function, it will stop at the first unmapped block
- * beyond i_size.
- *
- * If you use this function directly, you need to do your own locking. Use
- * generic_block_fiemap if you want the locking done for you.
- */
-static int __generic_block_fiemap(struct inode *inode,
-                          struct fiemap_extent_info *fieinfo, loff_t start,
-                          loff_t len, get_block_t *get_block)
-{
-       struct buffer_head map_bh;
-       sector_t start_blk, last_blk;
-       loff_t isize = i_size_read(inode);
-       u64 logical = 0, phys = 0, size = 0;
-       u32 flags = FIEMAP_EXTENT_MERGED;
-       bool past_eof = false, whole_file = false;
-       int ret = 0;
-
-       ret = fiemap_prep(inode, fieinfo, start, &len, FIEMAP_FLAG_SYNC);
-       if (ret)
-               return ret;
-
-       /*
-        * Either the i_mutex or other appropriate locking needs to be held
-        * since we expect isize to not change at all through the duration of
-        * this call.
-        */
-       if (len >= isize) {
-               whole_file = true;
-               len = isize;
-       }
-
-       /*
-        * Some filesystems can't deal with being asked to map less than
-        * blocksize, so make sure our len is at least block length.
-        */
-       if (logical_to_blk(inode, len) == 0)
-               len = blk_to_logical(inode, 1);
-
-       start_blk = logical_to_blk(inode, start);
-       last_blk = logical_to_blk(inode, start + len - 1);
-
-       do {
-               /*
-                * we set b_size to the total size we want so it will map as
-                * many contiguous blocks as possible at once
-                */
-               memset(&map_bh, 0, sizeof(struct buffer_head));
-               map_bh.b_size = len;
-
-               ret = get_block(inode, start_blk, &map_bh, 0);
-               if (ret)
-                       break;
-
-               /* HOLE */
-               if (!buffer_mapped(&map_bh)) {
-                       start_blk++;
-
-                       /*
-                        * We want to handle the case where there is an
-                        * allocated block at the front of the file, and then
-                        * nothing but holes up to the end of the file properly,
-                        * to make sure that extent at the front gets properly
-                        * marked with FIEMAP_EXTENT_LAST
-                        */
-                       if (!past_eof &&
-                           blk_to_logical(inode, start_blk) >= isize)
-                               past_eof = 1;
-
-                       /*
-                        * First hole after going past the EOF, this is our
-                        * last extent
-                        */
-                       if (past_eof && size) {
-                               flags = FIEMAP_EXTENT_MERGED|FIEMAP_EXTENT_LAST;
-                               ret = fiemap_fill_next_extent(fieinfo, logical,
-                                                             phys, size,
-                                                             flags);
-                       } else if (size) {
-                               ret = fiemap_fill_next_extent(fieinfo, logical,
-                                                             phys, size, flags);
-                               size = 0;
-                       }
-
-                       /* if we have holes up to/past EOF then we're done */
-                       if (start_blk > last_blk || past_eof || ret)
-                               break;
-               } else {
-                       /*
-                        * We have gone over the length of what we wanted to
-                        * map, and it wasn't the entire file, so add the extent
-                        * we got last time and exit.
-                        *
-                        * This is for the case where say we want to map all the
-                        * way up to the second to the last block in a file, but
-                        * the last block is a hole, making the second to last
-                        * block FIEMAP_EXTENT_LAST.  In this case we want to
-                        * see if there is a hole after the second to last block
-                        * so we can mark it properly.  If we found data after
-                        * we exceeded the length we were requesting, then we
-                        * are good to go, just add the extent to the fieinfo
-                        * and break
-                        */
-                       if (start_blk > last_blk && !whole_file) {
-                               ret = fiemap_fill_next_extent(fieinfo, logical,
-                                                             phys, size,
-                                                             flags);
-                               break;
-                       }
-
-                       /*
-                        * if size != 0 then we know we already have an extent
-                        * to add, so add it.
-                        */
-                       if (size) {
-                               ret = fiemap_fill_next_extent(fieinfo, logical,
-                                                             phys, size,
-                                                             flags);
-                               if (ret)
-                                       break;
-                       }
-
-                       logical = blk_to_logical(inode, start_blk);
-                       phys = blk_to_logical(inode, map_bh.b_blocknr);
-                       size = map_bh.b_size;
-                       flags = FIEMAP_EXTENT_MERGED;
-
-                       start_blk += logical_to_blk(inode, size);
-
-                       /*
-                        * If we are past the EOF, then we need to make sure as
-                        * soon as we find a hole that the last extent we found
-                        * is marked with FIEMAP_EXTENT_LAST
-                        */
-                       if (!past_eof && logical + size >= isize)
-                               past_eof = true;
-               }
-               cond_resched();
-               if (fatal_signal_pending(current)) {
-                       ret = -EINTR;
-                       break;
-               }
-
-       } while (1);
-
-       /* If ret is 1 then we just hit the end of the extent array */
-       if (ret == 1)
-               ret = 0;
-
-       return ret;
-}
-
-/**
- * generic_block_fiemap - FIEMAP for block based inodes
- * @inode: The inode to map
- * @fieinfo: The mapping information
- * @start: The initial block to map
- * @len: The length of the extect to attempt to map
- * @get_block: The block mapping function for the fs
- *
- * Calls __generic_block_fiemap to map the inode, after taking
- * the inode's mutex lock.
- */
-
-int generic_block_fiemap(struct inode *inode,
-                        struct fiemap_extent_info *fieinfo, u64 start,
-                        u64 len, get_block_t *get_block)
-{
-       int ret;
-       inode_lock(inode);
-       ret = __generic_block_fiemap(inode, fieinfo, start, len, get_block);
-       inode_unlock(inode);
-       return ret;
-}
-EXPORT_SYMBOL(generic_block_fiemap);
-
-#endif  /*  CONFIG_BLOCK  */
-
 /*
  * This provides compatibility with legacy XFS pre-allocation ioctls
  * which predate the fallocate syscall.
index 21edc42..678e2c5 100644 (file)
@@ -155,7 +155,6 @@ struct iso9660_options{
        unsigned int overriderockperm:1;
        unsigned int uid_set:1;
        unsigned int gid_set:1;
-       unsigned int utf8:1;
        unsigned char map;
        unsigned char check;
        unsigned int blocksize;
@@ -356,7 +355,6 @@ static int parse_options(char *options, struct iso9660_options *popt)
        popt->gid = GLOBAL_ROOT_GID;
        popt->uid = GLOBAL_ROOT_UID;
        popt->iocharset = NULL;
-       popt->utf8 = 0;
        popt->overriderockperm = 0;
        popt->session=-1;
        popt->sbsector=-1;
@@ -389,10 +387,13 @@ static int parse_options(char *options, struct iso9660_options *popt)
                case Opt_cruft:
                        popt->cruft = 1;
                        break;
+#ifdef CONFIG_JOLIET
                case Opt_utf8:
-                       popt->utf8 = 1;
+                       kfree(popt->iocharset);
+                       popt->iocharset = kstrdup("utf8", GFP_KERNEL);
+                       if (!popt->iocharset)
+                               return 0;
                        break;
-#ifdef CONFIG_JOLIET
                case Opt_iocharset:
                        kfree(popt->iocharset);
                        popt->iocharset = match_strdup(&args[0]);
@@ -495,7 +496,6 @@ static int isofs_show_options(struct seq_file *m, struct dentry *root)
        if (sbi->s_nocompress)          seq_puts(m, ",nocompress");
        if (sbi->s_overriderockperm)    seq_puts(m, ",overriderockperm");
        if (sbi->s_showassoc)           seq_puts(m, ",showassoc");
-       if (sbi->s_utf8)                seq_puts(m, ",utf8");
 
        if (sbi->s_check)               seq_printf(m, ",check=%c", sbi->s_check);
        if (sbi->s_mapping)             seq_printf(m, ",map=%c", sbi->s_mapping);
@@ -518,9 +518,10 @@ static int isofs_show_options(struct seq_file *m, struct dentry *root)
                seq_printf(m, ",fmode=%o", sbi->s_fmode);
 
 #ifdef CONFIG_JOLIET
-       if (sbi->s_nls_iocharset &&
-           strcmp(sbi->s_nls_iocharset->charset, CONFIG_NLS_DEFAULT) != 0)
+       if (sbi->s_nls_iocharset)
                seq_printf(m, ",iocharset=%s", sbi->s_nls_iocharset->charset);
+       else
+               seq_puts(m, ",iocharset=utf8");
 #endif
        return 0;
 }
@@ -863,14 +864,13 @@ root_found:
        sbi->s_nls_iocharset = NULL;
 
 #ifdef CONFIG_JOLIET
-       if (joliet_level && opt.utf8 == 0) {
+       if (joliet_level) {
                char *p = opt.iocharset ? opt.iocharset : CONFIG_NLS_DEFAULT;
-               sbi->s_nls_iocharset = load_nls(p);
-               if (! sbi->s_nls_iocharset) {
-                       /* Fail only if explicit charset specified */
-                       if (opt.iocharset)
+               if (strcmp(p, "utf8") != 0) {
+                       sbi->s_nls_iocharset = opt.iocharset ?
+                               load_nls(opt.iocharset) : load_nls_default();
+                       if (!sbi->s_nls_iocharset)
                                goto out_freesbi;
-                       sbi->s_nls_iocharset = load_nls_default();
                }
        }
 #endif
@@ -886,7 +886,6 @@ root_found:
        sbi->s_gid = opt.gid;
        sbi->s_uid_set = opt.uid_set;
        sbi->s_gid_set = opt.gid_set;
-       sbi->s_utf8 = opt.utf8;
        sbi->s_nocompress = opt.nocompress;
        sbi->s_overriderockperm = opt.overriderockperm;
        /*
index 055ec6c..dcdc191 100644 (file)
@@ -44,7 +44,6 @@ struct isofs_sb_info {
        unsigned char s_session;
        unsigned int  s_high_sierra:1;
        unsigned int  s_rock:2;
-       unsigned int  s_utf8:1;
        unsigned int  s_cruft:1; /* Broken disks with high byte of length
                                  * containing junk */
        unsigned int  s_nocompress:1;
index be8b6a9..c0f04a1 100644 (file)
@@ -41,14 +41,12 @@ uni16_to_x8(unsigned char *ascii, __be16 *uni, int len, struct nls_table *nls)
 int
 get_joliet_filename(struct iso_directory_record * de, unsigned char *outname, struct inode * inode)
 {
-       unsigned char utf8;
        struct nls_table *nls;
        unsigned char len = 0;
 
-       utf8 = ISOFS_SB(inode->i_sb)->s_utf8;
        nls = ISOFS_SB(inode->i_sb)->s_nls_iocharset;
 
-       if (utf8) {
+       if (!nls) {
                len = utf16s_to_utf8s((const wchar_t *) de->name,
                                de->name_len[0] >> 1, UTF16_BIG_ENDIAN,
                                outname, PAGE_SIZE);
index 74b2a1d..3d6fb4a 100644 (file)
@@ -1397,103 +1397,6 @@ static int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl)
        return error;
 }
 
-#ifdef CONFIG_MANDATORY_FILE_LOCKING
-/**
- * locks_mandatory_locked - Check for an active lock
- * @file: the file to check
- *
- * Searches the inode's list of locks to find any POSIX locks which conflict.
- * This function is called from locks_verify_locked() only.
- */
-int locks_mandatory_locked(struct file *file)
-{
-       int ret;
-       struct inode *inode = locks_inode(file);
-       struct file_lock_context *ctx;
-       struct file_lock *fl;
-
-       ctx = smp_load_acquire(&inode->i_flctx);
-       if (!ctx || list_empty_careful(&ctx->flc_posix))
-               return 0;
-
-       /*
-        * Search the lock list for this inode for any POSIX locks.
-        */
-       spin_lock(&ctx->flc_lock);
-       ret = 0;
-       list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
-               if (fl->fl_owner != current->files &&
-                   fl->fl_owner != file) {
-                       ret = -EAGAIN;
-                       break;
-               }
-       }
-       spin_unlock(&ctx->flc_lock);
-       return ret;
-}
-
-/**
- * locks_mandatory_area - Check for a conflicting lock
- * @inode:     the file to check
- * @filp:       how the file was opened (if it was)
- * @start:     first byte in the file to check
- * @end:       lastbyte in the file to check
- * @type:      %F_WRLCK for a write lock, else %F_RDLCK
- *
- * Searches the inode's list of locks to find any POSIX locks which conflict.
- */
-int locks_mandatory_area(struct inode *inode, struct file *filp, loff_t start,
-                        loff_t end, unsigned char type)
-{
-       struct file_lock fl;
-       int error;
-       bool sleep = false;
-
-       locks_init_lock(&fl);
-       fl.fl_pid = current->tgid;
-       fl.fl_file = filp;
-       fl.fl_flags = FL_POSIX | FL_ACCESS;
-       if (filp && !(filp->f_flags & O_NONBLOCK))
-               sleep = true;
-       fl.fl_type = type;
-       fl.fl_start = start;
-       fl.fl_end = end;
-
-       for (;;) {
-               if (filp) {
-                       fl.fl_owner = filp;
-                       fl.fl_flags &= ~FL_SLEEP;
-                       error = posix_lock_inode(inode, &fl, NULL);
-                       if (!error)
-                               break;
-               }
-
-               if (sleep)
-                       fl.fl_flags |= FL_SLEEP;
-               fl.fl_owner = current->files;
-               error = posix_lock_inode(inode, &fl, NULL);
-               if (error != FILE_LOCK_DEFERRED)
-                       break;
-               error = wait_event_interruptible(fl.fl_wait,
-                                       list_empty(&fl.fl_blocked_member));
-               if (!error) {
-                       /*
-                        * If we've been sleeping someone might have
-                        * changed the permissions behind our back.
-                        */
-                       if (__mandatory_lock(inode))
-                               continue;
-               }
-
-               break;
-       }
-       locks_delete_block(&fl);
-
-       return error;
-}
-EXPORT_SYMBOL(locks_mandatory_area);
-#endif /* CONFIG_MANDATORY_FILE_LOCKING */
-
 static void lease_clear_pending(struct file_lock *fl, int arg)
 {
        switch (arg) {
@@ -2486,14 +2389,6 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
        if (file_lock == NULL)
                return -ENOLCK;
 
-       /* Don't allow mandatory locks on files that may be memory mapped
-        * and shared.
-        */
-       if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
-               error = -EAGAIN;
-               goto out;
-       }
-
        error = flock_to_posix_lock(filp, file_lock, flock);
        if (error)
                goto out;
@@ -2611,21 +2506,12 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
                struct flock64 *flock)
 {
        struct file_lock *file_lock = locks_alloc_lock();
-       struct inode *inode = locks_inode(filp);
        struct file *f;
        int error;
 
        if (file_lock == NULL)
                return -ENOLCK;
 
-       /* Don't allow mandatory locks on files that may be memory mapped
-        * and shared.
-        */
-       if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
-               error = -EAGAIN;
-               goto out;
-       }
-
        error = flock64_to_posix_lock(filp, file_lock, flock);
        if (error)
                goto out;
@@ -2857,8 +2743,7 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl,
                        seq_puts(f, "POSIX ");
 
                seq_printf(f, " %s ",
-                            (inode == NULL) ? "*NOINODE*" :
-                            mandatory_lock(inode) ? "MANDATORY" : "ADVISORY ");
+                            (inode == NULL) ? "*NOINODE*" : "ADVISORY ");
        } else if (IS_FLOCK(fl)) {
                if (fl->fl_type & LOCK_MAND) {
                        seq_puts(f, "FLOCK  MSNFS     ");
index bf6d8a7..471eb9f 100644 (file)
@@ -3023,9 +3023,7 @@ static int handle_truncate(struct user_namespace *mnt_userns, struct file *filp)
        /*
         * Refuse to truncate files with mandatory locks held on them.
         */
-       error = locks_verify_locked(filp);
-       if (!error)
-               error = security_path_truncate(path);
+       error = security_path_truncate(path);
        if (!error) {
                error = do_truncate(mnt_userns, path->dentry, 0,
                                    ATTR_MTIME|ATTR_CTIME|ATTR_OPEN,
index ab4174a..20caa4b 100644 (file)
@@ -1715,18 +1715,14 @@ static inline bool may_mount(void)
        return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN);
 }
 
-#ifdef CONFIG_MANDATORY_FILE_LOCKING
-static inline bool may_mandlock(void)
+static void warn_mandlock(void)
 {
-       return capable(CAP_SYS_ADMIN);
+       pr_warn_once("=======================================================\n"
+                    "WARNING: The mand mount option has been deprecated and\n"
+                    "         and is ignored by this kernel. Remove the mand\n"
+                    "         option from the mount to silence this warning.\n"
+                    "=======================================================\n");
 }
-#else
-static inline bool may_mandlock(void)
-{
-       pr_warn("VFS: \"mand\" mount option not supported");
-       return false;
-}
-#endif
 
 static int can_umount(const struct path *path, int flags)
 {
@@ -1938,6 +1934,20 @@ void drop_collected_mounts(struct vfsmount *mnt)
        namespace_unlock();
 }
 
+static bool has_locked_children(struct mount *mnt, struct dentry *dentry)
+{
+       struct mount *child;
+
+       list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
+               if (!is_subdir(child->mnt_mountpoint, dentry))
+                       continue;
+
+               if (child->mnt.mnt_flags & MNT_LOCKED)
+                       return true;
+       }
+       return false;
+}
+
 /**
  * clone_private_mount - create a private clone of a path
  * @path: path to clone
@@ -1953,10 +1963,19 @@ struct vfsmount *clone_private_mount(const struct path *path)
        struct mount *old_mnt = real_mount(path->mnt);
        struct mount *new_mnt;
 
+       down_read(&namespace_sem);
        if (IS_MNT_UNBINDABLE(old_mnt))
-               return ERR_PTR(-EINVAL);
+               goto invalid;
+
+       if (!check_mnt(old_mnt))
+               goto invalid;
+
+       if (has_locked_children(old_mnt, path->dentry))
+               goto invalid;
 
        new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE);
+       up_read(&namespace_sem);
+
        if (IS_ERR(new_mnt))
                return ERR_CAST(new_mnt);
 
@@ -1964,6 +1983,10 @@ struct vfsmount *clone_private_mount(const struct path *path)
        new_mnt->mnt_ns = MNT_NS_INTERNAL;
 
        return &new_mnt->mnt;
+
+invalid:
+       up_read(&namespace_sem);
+       return ERR_PTR(-EINVAL);
 }
 EXPORT_SYMBOL_GPL(clone_private_mount);
 
@@ -2315,19 +2338,6 @@ static int do_change_type(struct path *path, int ms_flags)
        return err;
 }
 
-static bool has_locked_children(struct mount *mnt, struct dentry *dentry)
-{
-       struct mount *child;
-       list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
-               if (!is_subdir(child->mnt_mountpoint, dentry))
-                       continue;
-
-               if (child->mnt.mnt_flags & MNT_LOCKED)
-                       return true;
-       }
-       return false;
-}
-
 static struct mount *__do_loopback(struct path *old_path, int recurse)
 {
        struct mount *mnt = ERR_PTR(-EINVAL), *old = real_mount(old_path->mnt);
@@ -3179,8 +3189,8 @@ int path_mount(const char *dev_name, struct path *path,
                return ret;
        if (!may_mount())
                return -EPERM;
-       if ((flags & SB_MANDLOCK) && !may_mandlock())
-               return -EPERM;
+       if (flags & SB_MANDLOCK)
+               warn_mandlock();
 
        /* Default to relatime unless overriden */
        if (!(flags & MS_NOATIME))
@@ -3563,9 +3573,8 @@ SYSCALL_DEFINE3(fsmount, int, fs_fd, unsigned int, flags,
        if (fc->phase != FS_CONTEXT_AWAITING_MOUNT)
                goto err_unlock;
 
-       ret = -EPERM;
-       if ((fc->sb_flags & SB_MANDLOCK) && !may_mandlock())
-               goto err_unlock;
+       if (fc->sb_flags & SB_MANDLOCK)
+               warn_mandlock();
 
        newmount.mnt = vfs_create_mount(fc);
        if (IS_ERR(newmount.mnt)) {
index 1fef107..514be5d 100644 (file)
@@ -806,10 +806,6 @@ int nfs_lock(struct file *filp, int cmd, struct file_lock *fl)
 
        nfs_inc_stats(inode, NFSIOS_VFSLOCK);
 
-       /* No mandatory locks over NFS */
-       if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
-               goto out_err;
-
        if (NFS_SERVER(inode)->flags & NFS_MOUNT_LOCAL_FCNTL)
                is_local = 1;
 
index fa67ecd..8313e1d 100644 (file)
@@ -5735,16 +5735,6 @@ check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid,
                                NFS4_SHARE_DENY_READ);
 }
 
-/*
- * Allow READ/WRITE during grace period on recovered state only for files
- * that are not able to provide mandatory locking.
- */
-static inline int
-grace_disallows_io(struct net *net, struct inode *inode)
-{
-       return opens_in_grace(net) && mandatory_lock(inode);
-}
-
 static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
 {
        /*
@@ -6026,7 +6016,6 @@ nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
                stateid_t *stateid, int flags, struct nfsd_file **nfp,
                struct nfs4_stid **cstid)
 {
-       struct inode *ino = d_inode(fhp->fh_dentry);
        struct net *net = SVC_NET(rqstp);
        struct nfsd_net *nn = net_generic(net, nfsd_net_id);
        struct nfs4_stid *s = NULL;
@@ -6035,9 +6024,6 @@ nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
        if (nfp)
                *nfp = NULL;
 
-       if (grace_disallows_io(net, ino))
-               return nfserr_grace;
-
        if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) {
                status = check_special_stateids(net, fhp, stateid, flags);
                goto done;
index a224a5e..92e77f9 100644 (file)
@@ -333,7 +333,6 @@ nfsd_get_write_access(struct svc_rqst *rqstp, struct svc_fh *fhp,
                struct iattr *iap)
 {
        struct inode *inode = d_inode(fhp->fh_dentry);
-       int host_err;
 
        if (iap->ia_size < inode->i_size) {
                __be32 err;
@@ -343,20 +342,7 @@ nfsd_get_write_access(struct svc_rqst *rqstp, struct svc_fh *fhp,
                if (err)
                        return err;
        }
-
-       host_err = get_write_access(inode);
-       if (host_err)
-               goto out_nfserrno;
-
-       host_err = locks_verify_truncate(inode, NULL, iap->ia_size);
-       if (host_err)
-               goto out_put_write_access;
-       return 0;
-
-out_put_write_access:
-       put_write_access(inode);
-out_nfserrno:
-       return nfserrno(host_err);
+       return nfserrno(get_write_access(inode));
 }
 
 /*
@@ -750,13 +736,6 @@ __nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
        err = nfserr_perm;
        if (IS_APPEND(inode) && (may_flags & NFSD_MAY_WRITE))
                goto out;
-       /*
-        * We must ignore files (but only files) which might have mandatory
-        * locks on them because there is no way to know if the accesser has
-        * the lock.
-        */
-       if (S_ISREG((inode)->i_mode) && mandatory_lock(inode))
-               goto out;
 
        if (!inode->i_fop)
                goto out;
index 4abd928..f6b2d28 100644 (file)
@@ -1053,7 +1053,7 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent)
        sb->s_time_gran = 1;
        sb->s_max_links = NILFS_LINK_MAX;
 
-       sb->s_bdi = bdi_get(sb->s_bdev->bd_bdi);
+       sb->s_bdi = bdi_get(sb->s_bdev->bd_disk->bdi);
 
        err = load_nilfs(nilfs, sb);
        if (err)
index 64864fb..6facdf4 100644 (file)
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <linux/fanotify.h>
 #include <linux/fcntl.h>
+#include <linux/fdtable.h>
 #include <linux/file.h>
 #include <linux/fs.h>
 #include <linux/anon_inodes.h>
@@ -54,22 +55,27 @@ static int fanotify_max_queued_events __read_mostly;
 
 #include <linux/sysctl.h>
 
+static long ft_zero = 0;
+static long ft_int_max = INT_MAX;
+
 struct ctl_table fanotify_table[] = {
        {
                .procname       = "max_user_groups",
                .data   = &init_user_ns.ucount_max[UCOUNT_FANOTIFY_GROUPS],
-               .maxlen         = sizeof(int),
+               .maxlen         = sizeof(long),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec_minmax,
-               .extra1         = SYSCTL_ZERO,
+               .proc_handler   = proc_doulongvec_minmax,
+               .extra1         = &ft_zero,
+               .extra2         = &ft_int_max,
        },
        {
                .procname       = "max_user_marks",
                .data   = &init_user_ns.ucount_max[UCOUNT_FANOTIFY_MARKS],
-               .maxlen         = sizeof(int),
+               .maxlen         = sizeof(long),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec_minmax,
-               .extra1         = SYSCTL_ZERO,
+               .proc_handler   = proc_doulongvec_minmax,
+               .extra1         = &ft_zero,
+               .extra2         = &ft_int_max,
        },
        {
                .procname       = "max_queued_events",
@@ -104,8 +110,10 @@ struct kmem_cache *fanotify_path_event_cachep __read_mostly;
 struct kmem_cache *fanotify_perm_event_cachep __read_mostly;
 
 #define FANOTIFY_EVENT_ALIGN 4
-#define FANOTIFY_INFO_HDR_LEN \
+#define FANOTIFY_FID_INFO_HDR_LEN \
        (sizeof(struct fanotify_event_info_fid) + sizeof(struct file_handle))
+#define FANOTIFY_PIDFD_INFO_HDR_LEN \
+       sizeof(struct fanotify_event_info_pidfd)
 
 static int fanotify_fid_info_len(int fh_len, int name_len)
 {
@@ -114,10 +122,11 @@ static int fanotify_fid_info_len(int fh_len, int name_len)
        if (name_len)
                info_len += name_len + 1;
 
-       return roundup(FANOTIFY_INFO_HDR_LEN + info_len, FANOTIFY_EVENT_ALIGN);
+       return roundup(FANOTIFY_FID_INFO_HDR_LEN + info_len,
+                      FANOTIFY_EVENT_ALIGN);
 }
 
-static int fanotify_event_info_len(unsigned int fid_mode,
+static int fanotify_event_info_len(unsigned int info_mode,
                                   struct fanotify_event *event)
 {
        struct fanotify_info *info = fanotify_event_info(event);
@@ -128,7 +137,8 @@ static int fanotify_event_info_len(unsigned int fid_mode,
 
        if (dir_fh_len) {
                info_len += fanotify_fid_info_len(dir_fh_len, info->name_len);
-       } else if ((fid_mode & FAN_REPORT_NAME) && (event->mask & FAN_ONDIR)) {
+       } else if ((info_mode & FAN_REPORT_NAME) &&
+                  (event->mask & FAN_ONDIR)) {
                /*
                 * With group flag FAN_REPORT_NAME, if name was not recorded in
                 * event on a directory, we will report the name ".".
@@ -136,6 +146,9 @@ static int fanotify_event_info_len(unsigned int fid_mode,
                dot_len = 1;
        }
 
+       if (info_mode & FAN_REPORT_PIDFD)
+               info_len += FANOTIFY_PIDFD_INFO_HDR_LEN;
+
        if (fh_len)
                info_len += fanotify_fid_info_len(fh_len, dot_len);
 
@@ -171,7 +184,7 @@ static struct fanotify_event *get_one_event(struct fsnotify_group *group,
        size_t event_size = FAN_EVENT_METADATA_LEN;
        struct fanotify_event *event = NULL;
        struct fsnotify_event *fsn_event;
-       unsigned int fid_mode = FAN_GROUP_FLAG(group, FANOTIFY_FID_BITS);
+       unsigned int info_mode = FAN_GROUP_FLAG(group, FANOTIFY_INFO_MODES);
 
        pr_debug("%s: group=%p count=%zd\n", __func__, group, count);
 
@@ -181,8 +194,8 @@ static struct fanotify_event *get_one_event(struct fsnotify_group *group,
                goto out;
 
        event = FANOTIFY_E(fsn_event);
-       if (fid_mode)
-               event_size += fanotify_event_info_len(fid_mode, event);
+       if (info_mode)
+               event_size += fanotify_event_info_len(info_mode, event);
 
        if (event_size > count) {
                event = ERR_PTR(-EINVAL);
@@ -303,9 +316,10 @@ static int process_access_response(struct fsnotify_group *group,
        return -ENOENT;
 }
 
-static int copy_info_to_user(__kernel_fsid_t *fsid, struct fanotify_fh *fh,
-                            int info_type, const char *name, size_t name_len,
-                            char __user *buf, size_t count)
+static int copy_fid_info_to_user(__kernel_fsid_t *fsid, struct fanotify_fh *fh,
+                                int info_type, const char *name,
+                                size_t name_len,
+                                char __user *buf, size_t count)
 {
        struct fanotify_event_info_fid info = { };
        struct file_handle handle = { };
@@ -398,6 +412,117 @@ static int copy_info_to_user(__kernel_fsid_t *fsid, struct fanotify_fh *fh,
        return info_len;
 }
 
+static int copy_pidfd_info_to_user(int pidfd,
+                                  char __user *buf,
+                                  size_t count)
+{
+       struct fanotify_event_info_pidfd info = { };
+       size_t info_len = FANOTIFY_PIDFD_INFO_HDR_LEN;
+
+       if (WARN_ON_ONCE(info_len > count))
+               return -EFAULT;
+
+       info.hdr.info_type = FAN_EVENT_INFO_TYPE_PIDFD;
+       info.hdr.len = info_len;
+       info.pidfd = pidfd;
+
+       if (copy_to_user(buf, &info, info_len))
+               return -EFAULT;
+
+       return info_len;
+}
+
+static int copy_info_records_to_user(struct fanotify_event *event,
+                                    struct fanotify_info *info,
+                                    unsigned int info_mode, int pidfd,
+                                    char __user *buf, size_t count)
+{
+       int ret, total_bytes = 0, info_type = 0;
+       unsigned int fid_mode = info_mode & FANOTIFY_FID_BITS;
+       unsigned int pidfd_mode = info_mode & FAN_REPORT_PIDFD;
+
+       /*
+        * Event info records order is as follows: dir fid + name, child fid.
+        */
+       if (fanotify_event_dir_fh_len(event)) {
+               info_type = info->name_len ? FAN_EVENT_INFO_TYPE_DFID_NAME :
+                                            FAN_EVENT_INFO_TYPE_DFID;
+               ret = copy_fid_info_to_user(fanotify_event_fsid(event),
+                                           fanotify_info_dir_fh(info),
+                                           info_type,
+                                           fanotify_info_name(info),
+                                           info->name_len, buf, count);
+               if (ret < 0)
+                       return ret;
+
+               buf += ret;
+               count -= ret;
+               total_bytes += ret;
+       }
+
+       if (fanotify_event_object_fh_len(event)) {
+               const char *dot = NULL;
+               int dot_len = 0;
+
+               if (fid_mode == FAN_REPORT_FID || info_type) {
+                       /*
+                        * With only group flag FAN_REPORT_FID only type FID is
+                        * reported. Second info record type is always FID.
+                        */
+                       info_type = FAN_EVENT_INFO_TYPE_FID;
+               } else if ((fid_mode & FAN_REPORT_NAME) &&
+                          (event->mask & FAN_ONDIR)) {
+                       /*
+                        * With group flag FAN_REPORT_NAME, if name was not
+                        * recorded in an event on a directory, report the name
+                        * "." with info type DFID_NAME.
+                        */
+                       info_type = FAN_EVENT_INFO_TYPE_DFID_NAME;
+                       dot = ".";
+                       dot_len = 1;
+               } else if ((event->mask & ALL_FSNOTIFY_DIRENT_EVENTS) ||
+                          (event->mask & FAN_ONDIR)) {
+                       /*
+                        * With group flag FAN_REPORT_DIR_FID, a single info
+                        * record has type DFID for directory entry modification
+                        * event and for event on a directory.
+                        */
+                       info_type = FAN_EVENT_INFO_TYPE_DFID;
+               } else {
+                       /*
+                        * With group flags FAN_REPORT_DIR_FID|FAN_REPORT_FID,
+                        * a single info record has type FID for event on a
+                        * non-directory, when there is no directory to report.
+                        * For example, on FAN_DELETE_SELF event.
+                        */
+                       info_type = FAN_EVENT_INFO_TYPE_FID;
+               }
+
+               ret = copy_fid_info_to_user(fanotify_event_fsid(event),
+                                           fanotify_event_object_fh(event),
+                                           info_type, dot, dot_len,
+                                           buf, count);
+               if (ret < 0)
+                       return ret;
+
+               buf += ret;
+               count -= ret;
+               total_bytes += ret;
+       }
+
+       if (pidfd_mode) {
+               ret = copy_pidfd_info_to_user(pidfd, buf, count);
+               if (ret < 0)
+                       return ret;
+
+               buf += ret;
+               count -= ret;
+               total_bytes += ret;
+       }
+
+       return total_bytes;
+}
+
 static ssize_t copy_event_to_user(struct fsnotify_group *group,
                                  struct fanotify_event *event,
                                  char __user *buf, size_t count)
@@ -405,15 +530,15 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
        struct fanotify_event_metadata metadata;
        struct path *path = fanotify_event_path(event);
        struct fanotify_info *info = fanotify_event_info(event);
-       unsigned int fid_mode = FAN_GROUP_FLAG(group, FANOTIFY_FID_BITS);
+       unsigned int info_mode = FAN_GROUP_FLAG(group, FANOTIFY_INFO_MODES);
+       unsigned int pidfd_mode = info_mode & FAN_REPORT_PIDFD;
        struct file *f = NULL;
-       int ret, fd = FAN_NOFD;
-       int info_type = 0;
+       int ret, pidfd = FAN_NOPIDFD, fd = FAN_NOFD;
 
        pr_debug("%s: group=%p event=%p\n", __func__, group, event);
 
        metadata.event_len = FAN_EVENT_METADATA_LEN +
-                               fanotify_event_info_len(fid_mode, event);
+                               fanotify_event_info_len(info_mode, event);
        metadata.metadata_len = FAN_EVENT_METADATA_LEN;
        metadata.vers = FANOTIFY_METADATA_VERSION;
        metadata.reserved = 0;
@@ -442,6 +567,33 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
        }
        metadata.fd = fd;
 
+       if (pidfd_mode) {
+               /*
+                * Complain if the FAN_REPORT_PIDFD and FAN_REPORT_TID mutual
+                * exclusion is ever lifted. At the time of incoporating pidfd
+                * support within fanotify, the pidfd API only supported the
+                * creation of pidfds for thread-group leaders.
+                */
+               WARN_ON_ONCE(FAN_GROUP_FLAG(group, FAN_REPORT_TID));
+
+               /*
+                * The PIDTYPE_TGID check for an event->pid is performed
+                * preemptively in an attempt to catch out cases where the event
+                * listener reads events after the event generating process has
+                * already terminated. Report FAN_NOPIDFD to the event listener
+                * in those cases, with all other pidfd creation errors being
+                * reported as FAN_EPIDFD.
+                */
+               if (metadata.pid == 0 ||
+                   !pid_has_task(event->pid, PIDTYPE_TGID)) {
+                       pidfd = FAN_NOPIDFD;
+               } else {
+                       pidfd = pidfd_create(event->pid, 0);
+                       if (pidfd < 0)
+                               pidfd = FAN_EPIDFD;
+               }
+       }
+
        ret = -EFAULT;
        /*
         * Sanity check copy size in case get_one_event() and
@@ -462,67 +614,11 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
        if (f)
                fd_install(fd, f);
 
-       /* Event info records order is: dir fid + name, child fid */
-       if (fanotify_event_dir_fh_len(event)) {
-               info_type = info->name_len ? FAN_EVENT_INFO_TYPE_DFID_NAME :
-                                            FAN_EVENT_INFO_TYPE_DFID;
-               ret = copy_info_to_user(fanotify_event_fsid(event),
-                                       fanotify_info_dir_fh(info),
-                                       info_type, fanotify_info_name(info),
-                                       info->name_len, buf, count);
+       if (info_mode) {
+               ret = copy_info_records_to_user(event, info, info_mode, pidfd,
+                                               buf, count);
                if (ret < 0)
                        goto out_close_fd;
-
-               buf += ret;
-               count -= ret;
-       }
-
-       if (fanotify_event_object_fh_len(event)) {
-               const char *dot = NULL;
-               int dot_len = 0;
-
-               if (fid_mode == FAN_REPORT_FID || info_type) {
-                       /*
-                        * With only group flag FAN_REPORT_FID only type FID is
-                        * reported. Second info record type is always FID.
-                        */
-                       info_type = FAN_EVENT_INFO_TYPE_FID;
-               } else if ((fid_mode & FAN_REPORT_NAME) &&
-                          (event->mask & FAN_ONDIR)) {
-                       /*
-                        * With group flag FAN_REPORT_NAME, if name was not
-                        * recorded in an event on a directory, report the
-                        * name "." with info type DFID_NAME.
-                        */
-                       info_type = FAN_EVENT_INFO_TYPE_DFID_NAME;
-                       dot = ".";
-                       dot_len = 1;
-               } else if ((event->mask & ALL_FSNOTIFY_DIRENT_EVENTS) ||
-                          (event->mask & FAN_ONDIR)) {
-                       /*
-                        * With group flag FAN_REPORT_DIR_FID, a single info
-                        * record has type DFID for directory entry modification
-                        * event and for event on a directory.
-                        */
-                       info_type = FAN_EVENT_INFO_TYPE_DFID;
-               } else {
-                       /*
-                        * With group flags FAN_REPORT_DIR_FID|FAN_REPORT_FID,
-                        * a single info record has type FID for event on a
-                        * non-directory, when there is no directory to report.
-                        * For example, on FAN_DELETE_SELF event.
-                        */
-                       info_type = FAN_EVENT_INFO_TYPE_FID;
-               }
-
-               ret = copy_info_to_user(fanotify_event_fsid(event),
-                                       fanotify_event_object_fh(event),
-                                       info_type, dot, dot_len, buf, count);
-               if (ret < 0)
-                       goto out_close_fd;
-
-               buf += ret;
-               count -= ret;
        }
 
        return metadata.event_len;
@@ -532,6 +628,10 @@ out_close_fd:
                put_unused_fd(fd);
                fput(f);
        }
+
+       if (pidfd >= 0)
+               close_fd(pidfd);
+
        return ret;
 }
 
@@ -1077,6 +1177,14 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
 #endif
                return -EINVAL;
 
+       /*
+        * A pidfd can only be returned for a thread-group leader; thus
+        * FAN_REPORT_PIDFD and FAN_REPORT_TID need to remain mutually
+        * exclusive.
+        */
+       if ((flags & FAN_REPORT_PIDFD) && (flags & FAN_REPORT_TID))
+               return -EINVAL;
+
        if (event_f_flags & ~FANOTIFY_INIT_ALL_EVENT_F_BITS)
                return -EINVAL;
 
@@ -1478,7 +1586,7 @@ static int __init fanotify_user_setup(void)
                                     FANOTIFY_DEFAULT_MAX_USER_MARKS);
 
        BUILD_BUG_ON(FANOTIFY_INIT_FLAGS & FANOTIFY_INTERNAL_GROUP_FLAGS);
-       BUILD_BUG_ON(HWEIGHT32(FANOTIFY_INIT_FLAGS) != 10);
+       BUILD_BUG_ON(HWEIGHT32(FANOTIFY_INIT_FLAGS) != 11);
        BUILD_BUG_ON(HWEIGHT32(FANOTIFY_MARK_FLAGS) != 9);
 
        fanotify_mark_cache = KMEM_CACHE(fsnotify_mark,
index 30d422b..963e6ce 100644 (file)
@@ -87,15 +87,15 @@ static void fsnotify_unmount_inodes(struct super_block *sb)
 
        if (iput_inode)
                iput(iput_inode);
-       /* Wait for outstanding inode references from connectors */
-       wait_var_event(&sb->s_fsnotify_inode_refs,
-                      !atomic_long_read(&sb->s_fsnotify_inode_refs));
 }
 
 void fsnotify_sb_delete(struct super_block *sb)
 {
        fsnotify_unmount_inodes(sb);
        fsnotify_clear_marks_by_sb(sb);
+       /* Wait for outstanding object references from connectors */
+       wait_var_event(&sb->s_fsnotify_connectors,
+                      !atomic_long_read(&sb->s_fsnotify_connectors));
 }
 
 /*
index ff2063e..87d8a50 100644 (file)
@@ -27,6 +27,21 @@ static inline struct super_block *fsnotify_conn_sb(
        return container_of(conn->obj, struct super_block, s_fsnotify_marks);
 }
 
+static inline struct super_block *fsnotify_connector_sb(
+                               struct fsnotify_mark_connector *conn)
+{
+       switch (conn->type) {
+       case FSNOTIFY_OBJ_TYPE_INODE:
+               return fsnotify_conn_inode(conn)->i_sb;
+       case FSNOTIFY_OBJ_TYPE_VFSMOUNT:
+               return fsnotify_conn_mount(conn)->mnt.mnt_sb;
+       case FSNOTIFY_OBJ_TYPE_SB:
+               return fsnotify_conn_sb(conn);
+       default:
+               return NULL;
+       }
+}
+
 /* destroy all events sitting in this groups notification queue */
 extern void fsnotify_flush_notify(struct fsnotify_group *group);
 
index 98f61b3..6205124 100644 (file)
@@ -55,22 +55,27 @@ struct kmem_cache *inotify_inode_mark_cachep __read_mostly;
 
 #include <linux/sysctl.h>
 
+static long it_zero = 0;
+static long it_int_max = INT_MAX;
+
 struct ctl_table inotify_table[] = {
        {
                .procname       = "max_user_instances",
                .data           = &init_user_ns.ucount_max[UCOUNT_INOTIFY_INSTANCES],
-               .maxlen         = sizeof(int),
+               .maxlen         = sizeof(long),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec_minmax,
-               .extra1         = SYSCTL_ZERO,
+               .proc_handler   = proc_doulongvec_minmax,
+               .extra1         = &it_zero,
+               .extra2         = &it_int_max,
        },
        {
                .procname       = "max_user_watches",
                .data           = &init_user_ns.ucount_max[UCOUNT_INOTIFY_WATCHES],
-               .maxlen         = sizeof(int),
+               .maxlen         = sizeof(long),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec_minmax,
-               .extra1         = SYSCTL_ZERO,
+               .proc_handler   = proc_doulongvec_minmax,
+               .extra1         = &it_zero,
+               .extra2         = &it_int_max,
        },
        {
                .procname       = "max_queued_events",
index d32ab34..95006d1 100644 (file)
@@ -169,6 +169,37 @@ static void fsnotify_connector_destroy_workfn(struct work_struct *work)
        }
 }
 
+static void fsnotify_get_inode_ref(struct inode *inode)
+{
+       ihold(inode);
+       atomic_long_inc(&inode->i_sb->s_fsnotify_connectors);
+}
+
+static void fsnotify_put_inode_ref(struct inode *inode)
+{
+       struct super_block *sb = inode->i_sb;
+
+       iput(inode);
+       if (atomic_long_dec_and_test(&sb->s_fsnotify_connectors))
+               wake_up_var(&sb->s_fsnotify_connectors);
+}
+
+static void fsnotify_get_sb_connectors(struct fsnotify_mark_connector *conn)
+{
+       struct super_block *sb = fsnotify_connector_sb(conn);
+
+       if (sb)
+               atomic_long_inc(&sb->s_fsnotify_connectors);
+}
+
+static void fsnotify_put_sb_connectors(struct fsnotify_mark_connector *conn)
+{
+       struct super_block *sb = fsnotify_connector_sb(conn);
+
+       if (sb && atomic_long_dec_and_test(&sb->s_fsnotify_connectors))
+               wake_up_var(&sb->s_fsnotify_connectors);
+}
+
 static void *fsnotify_detach_connector_from_object(
                                        struct fsnotify_mark_connector *conn,
                                        unsigned int *type)
@@ -182,13 +213,13 @@ static void *fsnotify_detach_connector_from_object(
        if (conn->type == FSNOTIFY_OBJ_TYPE_INODE) {
                inode = fsnotify_conn_inode(conn);
                inode->i_fsnotify_mask = 0;
-               atomic_long_inc(&inode->i_sb->s_fsnotify_inode_refs);
        } else if (conn->type == FSNOTIFY_OBJ_TYPE_VFSMOUNT) {
                fsnotify_conn_mount(conn)->mnt_fsnotify_mask = 0;
        } else if (conn->type == FSNOTIFY_OBJ_TYPE_SB) {
                fsnotify_conn_sb(conn)->s_fsnotify_mask = 0;
        }
 
+       fsnotify_put_sb_connectors(conn);
        rcu_assign_pointer(*(conn->obj), NULL);
        conn->obj = NULL;
        conn->type = FSNOTIFY_OBJ_TYPE_DETACHED;
@@ -209,19 +240,12 @@ static void fsnotify_final_mark_destroy(struct fsnotify_mark *mark)
 /* Drop object reference originally held by a connector */
 static void fsnotify_drop_object(unsigned int type, void *objp)
 {
-       struct inode *inode;
-       struct super_block *sb;
-
        if (!objp)
                return;
        /* Currently only inode references are passed to be dropped */
        if (WARN_ON_ONCE(type != FSNOTIFY_OBJ_TYPE_INODE))
                return;
-       inode = objp;
-       sb = inode->i_sb;
-       iput(inode);
-       if (atomic_long_dec_and_test(&sb->s_fsnotify_inode_refs))
-               wake_up_var(&sb->s_fsnotify_inode_refs);
+       fsnotify_put_inode_ref(objp);
 }
 
 void fsnotify_put_mark(struct fsnotify_mark *mark)
@@ -493,8 +517,12 @@ static int fsnotify_attach_connector_to_object(fsnotify_connp_t *connp,
                conn->fsid.val[0] = conn->fsid.val[1] = 0;
                conn->flags = 0;
        }
-       if (conn->type == FSNOTIFY_OBJ_TYPE_INODE)
-               inode = igrab(fsnotify_conn_inode(conn));
+       if (conn->type == FSNOTIFY_OBJ_TYPE_INODE) {
+               inode = fsnotify_conn_inode(conn);
+               fsnotify_get_inode_ref(inode);
+       }
+       fsnotify_get_sb_connectors(conn);
+
        /*
         * cmpxchg() provides the barrier so that readers of *connp can see
         * only initialized structure
@@ -502,7 +530,7 @@ static int fsnotify_attach_connector_to_object(fsnotify_connp_t *connp,
        if (cmpxchg(connp, NULL, conn)) {
                /* Someone else created list structure for us */
                if (inode)
-                       iput(inode);
+                       fsnotify_put_inode_ref(inode);
                kmem_cache_free(fsnotify_mark_connector_cachep, conn);
        }
 
index 7756579..54d7843 100644 (file)
@@ -1529,6 +1529,45 @@ static void ocfs2_truncate_cluster_pages(struct inode *inode, u64 byte_start,
        }
 }
 
+/*
+ * zero out partial blocks of one cluster.
+ *
+ * start: file offset where zero starts, will be made upper block aligned.
+ * len: it will be trimmed to the end of current cluster if "start + len"
+ *      is bigger than it.
+ */
+static int ocfs2_zeroout_partial_cluster(struct inode *inode,
+                                       u64 start, u64 len)
+{
+       int ret;
+       u64 start_block, end_block, nr_blocks;
+       u64 p_block, offset;
+       u32 cluster, p_cluster, nr_clusters;
+       struct super_block *sb = inode->i_sb;
+       u64 end = ocfs2_align_bytes_to_clusters(sb, start);
+
+       if (start + len < end)
+               end = start + len;
+
+       start_block = ocfs2_blocks_for_bytes(sb, start);
+       end_block = ocfs2_blocks_for_bytes(sb, end);
+       nr_blocks = end_block - start_block;
+       if (!nr_blocks)
+               return 0;
+
+       cluster = ocfs2_bytes_to_clusters(sb, start);
+       ret = ocfs2_get_clusters(inode, cluster, &p_cluster,
+                               &nr_clusters, NULL);
+       if (ret)
+               return ret;
+       if (!p_cluster)
+               return 0;
+
+       offset = start_block - ocfs2_clusters_to_blocks(sb, cluster);
+       p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset;
+       return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS);
+}
+
 static int ocfs2_zero_partial_clusters(struct inode *inode,
                                       u64 start, u64 len)
 {
@@ -1538,6 +1577,7 @@ static int ocfs2_zero_partial_clusters(struct inode *inode,
        struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
        unsigned int csize = osb->s_clustersize;
        handle_t *handle;
+       loff_t isize = i_size_read(inode);
 
        /*
         * The "start" and "end" values are NOT necessarily part of
@@ -1558,6 +1598,26 @@ static int ocfs2_zero_partial_clusters(struct inode *inode,
        if ((start & (csize - 1)) == 0 && (end & (csize - 1)) == 0)
                goto out;
 
+       /* No page cache for EOF blocks, issue zero out to disk. */
+       if (end > isize) {
+               /*
+                * zeroout eof blocks in last cluster starting from
+                * "isize" even "start" > "isize" because it is
+                * complicated to zeroout just at "start" as "start"
+                * may be not aligned with block size, buffer write
+                * would be required to do that, but out of eof buffer
+                * write is not supported.
+                */
+               ret = ocfs2_zeroout_partial_cluster(inode, isize,
+                                       end - isize);
+               if (ret) {
+                       mlog_errno(ret);
+                       goto out;
+               }
+               if (start >= isize)
+                       goto out;
+               end = isize;
+       }
        handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
        if (IS_ERR(handle)) {
                ret = PTR_ERR(handle);
@@ -1855,45 +1915,6 @@ out:
        return ret;
 }
 
-/*
- * zero out partial blocks of one cluster.
- *
- * start: file offset where zero starts, will be made upper block aligned.
- * len: it will be trimmed to the end of current cluster if "start + len"
- *      is bigger than it.
- */
-static int ocfs2_zeroout_partial_cluster(struct inode *inode,
-                                       u64 start, u64 len)
-{
-       int ret;
-       u64 start_block, end_block, nr_blocks;
-       u64 p_block, offset;
-       u32 cluster, p_cluster, nr_clusters;
-       struct super_block *sb = inode->i_sb;
-       u64 end = ocfs2_align_bytes_to_clusters(sb, start);
-
-       if (start + len < end)
-               end = start + len;
-
-       start_block = ocfs2_blocks_for_bytes(sb, start);
-       end_block = ocfs2_blocks_for_bytes(sb, end);
-       nr_blocks = end_block - start_block;
-       if (!nr_blocks)
-               return 0;
-
-       cluster = ocfs2_bytes_to_clusters(sb, start);
-       ret = ocfs2_get_clusters(inode, cluster, &p_cluster,
-                               &nr_clusters, NULL);
-       if (ret)
-               return ret;
-       if (!p_cluster)
-               return 0;
-
-       offset = start_block - ocfs2_clusters_to_blocks(sb, cluster);
-       p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset;
-       return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS);
-}
-
 /*
  * Parts of this function taken from xfs_change_file_space()
  */
@@ -1935,7 +1956,6 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
                goto out_inode_unlock;
        }
 
-       orig_isize = i_size_read(inode);
        switch (sr->l_whence) {
        case 0: /*SEEK_SET*/
                break;
@@ -1943,7 +1963,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
                sr->l_start += f_pos;
                break;
        case 2: /*SEEK_END*/
-               sr->l_start += orig_isize;
+               sr->l_start += i_size_read(inode);
                break;
        default:
                ret = -EINVAL;
@@ -1998,6 +2018,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
                ret = -EINVAL;
        }
 
+       orig_isize = i_size_read(inode);
        /* zeroout eof blocks in the cluster. */
        if (!ret && change_size && orig_isize < size) {
                ret = ocfs2_zeroout_partial_cluster(inode, orig_isize,
index fab7c6a..73a3854 100644 (file)
@@ -101,8 +101,6 @@ int ocfs2_flock(struct file *file, int cmd, struct file_lock *fl)
 
        if (!(fl->fl_flags & FL_FLOCK))
                return -ENOLCK;
-       if (__mandatory_lock(inode))
-               return -ENOLCK;
 
        if ((osb->s_mount_opt & OCFS2_MOUNT_LOCALFLOCKS) ||
            ocfs2_mount_local(osb))
@@ -121,8 +119,6 @@ int ocfs2_lock(struct file *file, int cmd, struct file_lock *fl)
 
        if (!(fl->fl_flags & FL_POSIX))
                return -ENOLCK;
-       if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
-               return -ENOLCK;
 
        return ocfs2_plock(osb->cconn, OCFS2_I(inode)->ip_blkno, file, cmd, fl);
 }
index 94bef26..daa3246 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -105,9 +105,7 @@ long vfs_truncate(const struct path *path, loff_t length)
        if (error)
                goto put_write_and_out;
 
-       error = locks_verify_truncate(inode, NULL, length);
-       if (!error)
-               error = security_path_truncate(path);
+       error = security_path_truncate(path);
        if (!error)
                error = do_truncate(mnt_userns, path->dentry, length, 0, NULL);
 
@@ -189,9 +187,7 @@ long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
        if (IS_APPEND(file_inode(f.file)))
                goto out_putf;
        sb_start_write(inode->i_sb);
-       error = locks_verify_truncate(inode, f.file, length);
-       if (!error)
-               error = security_path_truncate(&f.file->f_path);
+       error = security_path_truncate(&f.file->f_path);
        if (!error)
                error = do_truncate(file_mnt_user_ns(f.file), dentry, length,
                                    ATTR_MTIME | ATTR_CTIME, f.file);
index 41ebf52..ebde05c 100644 (file)
@@ -392,6 +392,7 @@ static struct dentry *ovl_lookup_real_one(struct dentry *connected,
         */
        take_dentry_name_snapshot(&name, real);
        this = lookup_one_len(name.name.name, connected, name.name.len);
+       release_dentry_name_snapshot(&name);
        err = PTR_ERR(this);
        if (IS_ERR(this)) {
                goto fail;
@@ -406,7 +407,6 @@ static struct dentry *ovl_lookup_real_one(struct dentry *connected,
        }
 
 out:
-       release_dentry_name_snapshot(&name);
        dput(parent);
        inode_unlock(dir);
        return this;
index 4d53d3b..d081faa 100644 (file)
@@ -392,6 +392,51 @@ out_unlock:
        return ret;
 }
 
+/*
+ * Calling iter_file_splice_write() directly from overlay's f_op may deadlock
+ * due to lock order inversion between pipe->mutex in iter_file_splice_write()
+ * and file_start_write(real.file) in ovl_write_iter().
+ *
+ * So do everything ovl_write_iter() does and call iter_file_splice_write() on
+ * the real file.
+ */
+static ssize_t ovl_splice_write(struct pipe_inode_info *pipe, struct file *out,
+                               loff_t *ppos, size_t len, unsigned int flags)
+{
+       struct fd real;
+       const struct cred *old_cred;
+       struct inode *inode = file_inode(out);
+       struct inode *realinode = ovl_inode_real(inode);
+       ssize_t ret;
+
+       inode_lock(inode);
+       /* Update mode */
+       ovl_copyattr(realinode, inode);
+       ret = file_remove_privs(out);
+       if (ret)
+               goto out_unlock;
+
+       ret = ovl_real_fdget(out, &real);
+       if (ret)
+               goto out_unlock;
+
+       old_cred = ovl_override_creds(inode->i_sb);
+       file_start_write(real.file);
+
+       ret = iter_file_splice_write(pipe, real.file, ppos, len, flags);
+
+       file_end_write(real.file);
+       /* Update size */
+       ovl_copyattr(realinode, inode);
+       revert_creds(old_cred);
+       fdput(real);
+
+out_unlock:
+       inode_unlock(inode);
+
+       return ret;
+}
+
 static int ovl_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 {
        struct fd real;
@@ -603,7 +648,7 @@ const struct file_operations ovl_file_operations = {
        .fadvise        = ovl_fadvise,
        .flush          = ovl_flush,
        .splice_read    = generic_file_splice_read,
-       .splice_write   = iter_file_splice_write,
+       .splice_write   = ovl_splice_write,
 
        .copy_file_range        = ovl_copy_file_range,
        .remap_file_range       = ovl_remap_file_range,
index e8ad2c2..150fdf3 100644 (file)
@@ -481,6 +481,8 @@ static int ovl_cache_update_ino(struct path *path, struct ovl_cache_entry *p)
        }
        this = lookup_one_len(p->name, dir, p->len);
        if (IS_ERR_OR_NULL(this) || !this->d_inode) {
+               /* Mark a stale entry */
+               p->is_whiteout = true;
                if (IS_ERR(this)) {
                        err = PTR_ERR(this);
                        this = NULL;
@@ -776,6 +778,9 @@ static int ovl_iterate(struct file *file, struct dir_context *ctx)
                                if (err)
                                        goto out;
                        }
+               }
+               /* ovl_cache_update_ino() sets is_whiteout on stale entry */
+               if (!p->is_whiteout) {
                        if (!dir_emit(ctx, p->name, p->len, p->ino, p->type))
                                break;
                }
index bfd946a..6d4342b 100644 (file)
--- a/fs/pipe.c
+++ b/fs/pipe.c
 
 #include "internal.h"
 
+/*
+ * New pipe buffers will be restricted to this size while the user is exceeding
+ * their pipe buffer quota. The general pipe use case needs at least two
+ * buffers: one for data yet to be read, and one for new data. If this is less
+ * than two, then a write to a non-empty pipe may block even if the pipe is not
+ * full. This can occur with GNU make jobserver or similar uses of pipes as
+ * semaphores: multiple processes may be waiting to write tokens back to the
+ * pipe before reading tokens: https://lore.kernel.org/lkml/1628086770.5rn8p04n6j.none@localhost/.
+ *
+ * Users can reduce their pipe buffers with F_SETPIPE_SZ below this at their
+ * own risk, namely: pipe writes to non-full pipes may block until the pipe is
+ * emptied.
+ */
+#define PIPE_MIN_DEF_BUFFERS 2
+
 /*
  * The max size that a non-root user is allowed to grow the pipe. Can
  * be set by root in /proc/sys/fs/pipe-max-size
@@ -348,10 +363,9 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
                 * _very_ unlikely case that the pipe was full, but we got
                 * no data.
                 */
-               if (unlikely(was_full)) {
+               if (unlikely(was_full))
                        wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
-                       kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
-               }
+               kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
 
                /*
                 * But because we didn't read anything, at this point we can
@@ -370,12 +384,11 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
                wake_next_reader = false;
        __pipe_unlock(pipe);
 
-       if (was_full) {
+       if (was_full)
                wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
-               kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
-       }
        if (wake_next_reader)
                wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
+       kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
        if (ret > 0)
                file_accessed(filp);
        return ret;
@@ -429,14 +442,11 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
 #endif
 
        /*
-        * Only wake up if the pipe started out empty, since
-        * otherwise there should be no readers waiting.
-        *
         * If it wasn't empty we try to merge new data into
         * the last buffer.
         *
         * That naturally merges small writes, but it also
-        * page-aligs the rest of the writes for large writes
+        * page-aligns the rest of the writes for large writes
         * spanning multiple pages.
         */
        head = pipe->head;
@@ -553,10 +563,9 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
                 * become empty while we dropped the lock.
                 */
                __pipe_unlock(pipe);
-               if (was_empty) {
+               if (was_empty)
                        wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
-                       kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
-               }
+               kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
                wait_event_interruptible_exclusive(pipe->wr_wait, pipe_writable(pipe));
                __pipe_lock(pipe);
                was_empty = pipe_empty(pipe->head, pipe->tail);
@@ -575,11 +584,13 @@ out:
         * This is particularly important for small writes, because of
         * how (for example) the GNU make jobserver uses small writes to
         * wake up pending jobs
+        *
+        * Epoll nonsensically wants a wakeup whether the pipe
+        * was already empty or not.
         */
-       if (was_empty) {
+       if (was_empty || pipe->poll_usage)
                wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
-               kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
-       }
+       kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
        if (wake_next_writer)
                wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
        if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) {
@@ -639,6 +650,9 @@ pipe_poll(struct file *filp, poll_table *wait)
        struct pipe_inode_info *pipe = filp->private_data;
        unsigned int head, tail;
 
+       /* Epoll has some historical nasty semantics, this enables them */
+       pipe->poll_usage = 1;
+
        /*
         * Reading pipe state only -- no need for acquiring the semaphore.
         *
@@ -781,8 +795,8 @@ struct pipe_inode_info *alloc_pipe_info(void)
        user_bufs = account_pipe_buffers(user, 0, pipe_bufs);
 
        if (too_many_pipe_buffers_soft(user_bufs) && pipe_is_unprivileged_user()) {
-               user_bufs = account_pipe_buffers(user, pipe_bufs, 1);
-               pipe_bufs = 1;
+               user_bufs = account_pipe_buffers(user, pipe_bufs, PIPE_MIN_DEF_BUFFERS);
+               pipe_bufs = PIPE_MIN_DEF_BUFFERS;
        }
 
        if (too_many_pipe_buffers_hard(user_bufs) && pipe_is_unprivileged_user())
index 9db7adf..af057c5 100644 (file)
@@ -365,12 +365,8 @@ out_putf:
 
 int rw_verify_area(int read_write, struct file *file, const loff_t *ppos, size_t count)
 {
-       struct inode *inode;
-       int retval = -EINVAL;
-
-       inode = file_inode(file);
        if (unlikely((ssize_t) count < 0))
-               return retval;
+               return -EINVAL;
 
        /*
         * ranged mandatory locking does not apply to streams - it makes sense
@@ -381,19 +377,12 @@ int rw_verify_area(int read_write, struct file *file, const loff_t *ppos, size_t
 
                if (unlikely(pos < 0)) {
                        if (!unsigned_offsets(file))
-                               return retval;
+                               return -EINVAL;
                        if (count >= -pos) /* both values are in 0..LLONG_MAX */
                                return -EOVERFLOW;
                } else if (unlikely((loff_t) (pos + count) < 0)) {
                        if (!unsigned_offsets(file))
-                               return retval;
-               }
-
-               if (unlikely(inode->i_flctx && mandatory_lock(inode))) {
-                       retval = locks_mandatory_area(inode, file, pos, pos + count - 1,
-                                       read_write == READ ? F_RDLCK : F_WRLCK);
-                       if (retval < 0)
-                               return retval;
+                               return -EINVAL;
                }
        }
 
index 476a7ff..ef42729 100644 (file)
@@ -387,6 +387,24 @@ void pathrelse(struct treepath *search_path)
        search_path->path_length = ILLEGAL_PATH_ELEMENT_OFFSET;
 }
 
+static int has_valid_deh_location(struct buffer_head *bh, struct item_head *ih)
+{
+       struct reiserfs_de_head *deh;
+       int i;
+
+       deh = B_I_DEH(bh, ih);
+       for (i = 0; i < ih_entry_count(ih); i++) {
+               if (deh_location(&deh[i]) > ih_item_len(ih)) {
+                       reiserfs_warning(NULL, "reiserfs-5094",
+                                        "directory entry location seems wrong %h",
+                                        &deh[i]);
+                       return 0;
+               }
+       }
+
+       return 1;
+}
+
 static int is_leaf(char *buf, int blocksize, struct buffer_head *bh)
 {
        struct block_head *blkh;
@@ -454,11 +472,14 @@ static int is_leaf(char *buf, int blocksize, struct buffer_head *bh)
                                         "(second one): %h", ih);
                        return 0;
                }
-               if (is_direntry_le_ih(ih) && (ih_item_len(ih) < (ih_entry_count(ih) * IH_SIZE))) {
-                       reiserfs_warning(NULL, "reiserfs-5093",
-                                        "item entry count seems wrong %h",
-                                        ih);
-                       return 0;
+               if (is_direntry_le_ih(ih)) {
+                       if (ih_item_len(ih) < (ih_entry_count(ih) * IH_SIZE)) {
+                               reiserfs_warning(NULL, "reiserfs-5093",
+                                                "item entry count seems wrong %h",
+                                                ih);
+                               return 0;
+                       }
+                       return has_valid_deh_location(bh, ih);
                }
                prev_location = ih_location(ih);
        }
index 3ffafc7..58481f8 100644 (file)
@@ -2082,6 +2082,14 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
                unlock_new_inode(root_inode);
        }
 
+       if (!S_ISDIR(root_inode->i_mode) || !inode_get_bytes(root_inode) ||
+           !root_inode->i_size) {
+               SWARN(silent, s, "", "corrupt root inode, run fsck");
+               iput(root_inode);
+               errval = -EUCLEAN;
+               goto error;
+       }
+
        s->s_root = d_make_root(root_inode);
        if (!s->s_root)
                goto error;
index e4a5fdd..6d4a9be 100644 (file)
@@ -99,24 +99,12 @@ static int generic_remap_checks(struct file *file_in, loff_t pos_in,
 static int remap_verify_area(struct file *file, loff_t pos, loff_t len,
                             bool write)
 {
-       struct inode *inode = file_inode(file);
-
        if (unlikely(pos < 0 || len < 0))
                return -EINVAL;
 
        if (unlikely((loff_t) (pos + len) < 0))
                return -EINVAL;
 
-       if (unlikely(inode->i_flctx && mandatory_lock(inode))) {
-               loff_t end = len ? pos + len - 1 : OFFSET_MAX;
-               int retval;
-
-               retval = locks_mandatory_area(inode, file, pos, end,
-                               write ? F_WRLCK : F_RDLCK);
-               if (retval < 0)
-                       return retval;
-       }
-
        return security_file_permission(file, write ? MAY_WRITE : MAY_READ);
 }
 
index 855f0e8..2db8bcf 100644 (file)
@@ -49,8 +49,7 @@ static int copy_bio_to_actor(struct bio *bio,
 
                bytes_to_copy = min_t(int, bytes_to_copy,
                                      req_length - copied_bytes);
-               memcpy(actor_addr + actor_offset,
-                      page_address(bvec->bv_page) + bvec->bv_offset + offset,
+               memcpy(actor_addr + actor_offset, bvec_virt(bvec) + offset,
                       bytes_to_copy);
 
                actor_offset += bytes_to_copy;
@@ -177,7 +176,7 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length,
                        goto out_free_bio;
                }
                /* Extract the length of the metadata block */
-               data = page_address(bvec->bv_page) + bvec->bv_offset;
+               data = bvec_virt(bvec);
                length = data[offset];
                if (offset < bvec->bv_len - 1) {
                        length |= data[offset + 1] << 8;
@@ -186,7 +185,7 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length,
                                res = -EIO;
                                goto out_free_bio;
                        }
-                       data = page_address(bvec->bv_page) + bvec->bv_offset;
+                       data = bvec_virt(bvec);
                        length |= data[0] << 8;
                }
                bio_free_pages(bio);
index 233d558..b685b62 100644 (file)
@@ -101,7 +101,7 @@ static int lz4_uncompress(struct squashfs_sb_info *msblk, void *strm,
        while (bio_next_segment(bio, &iter_all)) {
                int avail = min(bytes, ((int)bvec->bv_len) - offset);
 
-               data = page_address(bvec->bv_page) + bvec->bv_offset;
+               data = bvec_virt(bvec);
                memcpy(buff, data + offset, avail);
                buff += avail;
                bytes -= avail;
index 97bb7d9..cb510a6 100644 (file)
@@ -76,7 +76,7 @@ static int lzo_uncompress(struct squashfs_sb_info *msblk, void *strm,
        while (bio_next_segment(bio, &iter_all)) {
                int avail = min(bytes, ((int)bvec->bv_len) - offset);
 
-               data = page_address(bvec->bv_page) + bvec->bv_offset;
+               data = bvec_virt(bvec);
                memcpy(buff, data + offset, avail);
                buff += avail;
                bytes -= avail;
index e80419a..68f6d09 100644 (file)
@@ -146,7 +146,7 @@ static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void *strm,
                        }
 
                        avail = min(length, ((int)bvec->bv_len) - offset);
-                       data = page_address(bvec->bv_page) + bvec->bv_offset;
+                       data = bvec_virt(bvec);
                        length -= avail;
                        stream->buf.in = data + offset;
                        stream->buf.in_size = avail;
index bcb881e..a20e904 100644 (file)
@@ -76,7 +76,7 @@ static int zlib_uncompress(struct squashfs_sb_info *msblk, void *strm,
                        }
 
                        avail = min(length, ((int)bvec->bv_len) - offset);
-                       data = page_address(bvec->bv_page) + bvec->bv_offset;
+                       data = bvec_virt(bvec);
                        length -= avail;
                        stream->next_in = data + offset;
                        stream->avail_in = avail;
index b7cb1fa..0015cf8 100644 (file)
@@ -94,7 +94,7 @@ static int zstd_uncompress(struct squashfs_sb_info *msblk, void *strm,
                        }
 
                        avail = min(length, ((int)bvec->bv_len) - offset);
-                       data = page_address(bvec->bv_page) + bvec->bv_offset;
+                       data = bvec_virt(bvec);
                        length -= avail;
                        in_buf.src = data + offset;
                        in_buf.size = avail;
index 91b7f15..bcef3a6 100644 (file)
@@ -1203,7 +1203,7 @@ static int set_bdev_super(struct super_block *s, void *data)
 {
        s->s_bdev = data;
        s->s_dev = s->s_bdev->bd_dev;
-       s->s_bdi = bdi_get(s->s_bdev->bd_bdi);
+       s->s_bdi = bdi_get(s->s_bdev->bd_disk->bdi);
 
        if (blk_queue_stable_writes(s->s_bdev->bd_disk->queue))
                s->s_iflags |= SB_I_STABLE_WRITES;
index c5509d2..e9c96a0 100644 (file)
@@ -115,6 +115,22 @@ void timerfd_clock_was_set(void)
        rcu_read_unlock();
 }
 
+static void timerfd_resume_work(struct work_struct *work)
+{
+       timerfd_clock_was_set();
+}
+
+static DECLARE_WORK(timerfd_work, timerfd_resume_work);
+
+/*
+ * Invoked from timekeeping_resume(). Defer the actual update to work so
+ * timerfd_clock_was_set() runs in task context.
+ */
+void timerfd_resume(void)
+{
+       schedule_work(&timerfd_work);
+}
+
 static void __timerfd_remove_cancel(struct timerfd_ctx *ctx)
 {
        if (ctx->might_cancel) {
index c19dba4..70abdfa 100644 (file)
@@ -35,7 +35,6 @@
 #include "udf_i.h"
 #include "udf_sb.h"
 
-
 static int udf_readdir(struct file *file, struct dir_context *ctx)
 {
        struct inode *dir = file_inode(file);
@@ -135,7 +134,7 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
                lfi = cfi.lengthFileIdent;
 
                if (fibh.sbh == fibh.ebh) {
-                       nameptr = fi->fileIdent + liu;
+                       nameptr = udf_get_fi_ident(fi);
                } else {
                        int poffset;    /* Unpaded ending offset */
 
@@ -153,7 +152,7 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
                                        }
                                }
                                nameptr = copy_name;
-                               memcpy(nameptr, fi->fileIdent + liu,
+                               memcpy(nameptr, udf_get_fi_ident(fi),
                                       lfi - poffset);
                                memcpy(nameptr + lfi - poffset,
                                       fibh.ebh->b_data, poffset);
index 185c3e2..de17a97 100644 (file)
@@ -307,14 +307,14 @@ struct logicalVolDesc {
        struct regid            impIdent;
        uint8_t                 impUse[128];
        struct extent_ad        integritySeqExt;
-       uint8_t                 partitionMaps[0];
+       uint8_t                 partitionMaps[];
 } __packed;
 
 /* Generic Partition Map (ECMA 167r3 3/10.7.1) */
 struct genericPartitionMap {
        uint8_t         partitionMapType;
        uint8_t         partitionMapLength;
-       uint8_t         partitionMapping[0];
+       uint8_t         partitionMapping[];
 } __packed;
 
 /* Partition Map Type (ECMA 167r3 3/10.7.1.1) */
@@ -342,7 +342,7 @@ struct unallocSpaceDesc {
        struct tag              descTag;
        __le32                  volDescSeqNum;
        __le32                  numAllocDescs;
-       struct extent_ad        allocDescs[0];
+       struct extent_ad        allocDescs[];
 } __packed;
 
 /* Terminating Descriptor (ECMA 167r3 3/10.9) */
@@ -360,9 +360,9 @@ struct logicalVolIntegrityDesc {
        uint8_t                 logicalVolContentsUse[32];
        __le32                  numOfPartitions;
        __le32                  lengthOfImpUse;
-       __le32                  freeSpaceTable[0];
-       __le32                  sizeTable[0];
-       uint8_t                 impUse[0];
+       __le32                  freeSpaceTable[];
+       /* __le32               sizeTable[]; */
+       /* uint8_t              impUse[]; */
 } __packed;
 
 /* Integrity Type (ECMA 167r3 3/10.10.3) */
@@ -471,9 +471,9 @@ struct fileIdentDesc {
        uint8_t         lengthFileIdent;
        struct long_ad  icb;
        __le16          lengthOfImpUse;
-       uint8_t         impUse[0];
-       uint8_t         fileIdent[0];
-       uint8_t         padding[0];
+       uint8_t         impUse[];
+       /* uint8_t      fileIdent[]; */
+       /* uint8_t      padding[]; */
 } __packed;
 
 /* File Characteristics (ECMA 167r3 4/14.4.3) */
@@ -578,8 +578,8 @@ struct fileEntry {
        __le64                  uniqueID;
        __le32                  lengthExtendedAttr;
        __le32                  lengthAllocDescs;
-       uint8_t                 extendedAttr[0];
-       uint8_t                 allocDescs[0];
+       uint8_t                 extendedAttr[];
+       /* uint8_t              allocDescs[]; */
 } __packed;
 
 /* Permissions (ECMA 167r3 4/14.9.5) */
@@ -632,7 +632,7 @@ struct genericFormat {
        uint8_t         attrSubtype;
        uint8_t         reserved[3];
        __le32          attrLength;
-       uint8_t         attrData[0];
+       uint8_t         attrData[];
 } __packed;
 
 /* Character Set Information (ECMA 167r3 4/14.10.3) */
@@ -643,7 +643,7 @@ struct charSetInfo {
        __le32          attrLength;
        __le32          escapeSeqLength;
        uint8_t         charSetType;
-       uint8_t         escapeSeq[0];
+       uint8_t         escapeSeq[];
 } __packed;
 
 /* Alternate Permissions (ECMA 167r3 4/14.10.4) */
@@ -682,7 +682,7 @@ struct infoTimesExtAttr {
        __le32          attrLength;
        __le32          dataLength;
        __le32          infoTimeExistence;
-       uint8_t         infoTimes[0];
+       uint8_t         infoTimes[];
 } __packed;
 
 /* Device Specification (ECMA 167r3 4/14.10.7) */
@@ -694,7 +694,7 @@ struct deviceSpec {
        __le32          impUseLength;
        __le32          majorDeviceIdent;
        __le32          minorDeviceIdent;
-       uint8_t         impUse[0];
+       uint8_t         impUse[];
 } __packed;
 
 /* Implementation Use Extended Attr (ECMA 167r3 4/14.10.8) */
@@ -705,7 +705,7 @@ struct impUseExtAttr {
        __le32          attrLength;
        __le32          impUseLength;
        struct regid    impIdent;
-       uint8_t         impUse[0];
+       uint8_t         impUse[];
 } __packed;
 
 /* Application Use Extended Attribute (ECMA 167r3 4/14.10.9) */
@@ -716,7 +716,7 @@ struct appUseExtAttr {
        __le32          attrLength;
        __le32          appUseLength;
        struct regid    appIdent;
-       uint8_t         appUse[0];
+       uint8_t         appUse[];
 } __packed;
 
 #define EXTATTR_CHAR_SET               1
@@ -733,7 +733,7 @@ struct unallocSpaceEntry {
        struct tag      descTag;
        struct icbtag   icbTag;
        __le32          lengthAllocDescs;
-       uint8_t         allocDescs[0];
+       uint8_t         allocDescs[];
 } __packed;
 
 /* Space Bitmap Descriptor (ECMA 167r3 4/14.12) */
@@ -741,7 +741,7 @@ struct spaceBitmapDesc {
        struct tag      descTag;
        __le32          numOfBits;
        __le32          numOfBytes;
-       uint8_t         bitmap[0];
+       uint8_t         bitmap[];
 } __packed;
 
 /* Partition Integrity Entry (ECMA 167r3 4/14.13) */
@@ -780,7 +780,7 @@ struct pathComponent {
        uint8_t         componentType;
        uint8_t         lengthComponentIdent;
        __le16          componentFileVersionNum;
-       dchars          componentIdent[0];
+       dchars          componentIdent[];
 } __packed;
 
 /* File Entry (ECMA 167r3 4/14.17) */
@@ -809,8 +809,8 @@ struct extendedFileEntry {
        __le64                  uniqueID;
        __le32                  lengthExtendedAttr;
        __le32                  lengthAllocDescs;
-       uint8_t                 extendedAttr[0];
-       uint8_t                 allocDescs[0];
+       uint8_t                 extendedAttr[];
+       /* uint8_t              allocDescs[]; */
 } __packed;
 
 #endif /* _ECMA_167_H */
index 4917670..1d6b7a5 100644 (file)
@@ -390,8 +390,7 @@ struct buffer_head *udf_expand_dir_adinicb(struct inode *inode,
                dfibh.eoffset += (sfibh.eoffset - sfibh.soffset);
                dfi = (struct fileIdentDesc *)(dbh->b_data + dfibh.soffset);
                if (udf_write_fi(inode, sfi, dfi, &dfibh, sfi->impUse,
-                                sfi->fileIdent +
-                                       le16_to_cpu(sfi->lengthOfImpUse))) {
+                                udf_get_fi_ident(sfi))) {
                        iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
                        brelse(dbh);
                        return NULL;
index eab9452..1614d30 100644 (file)
@@ -173,13 +173,22 @@ struct genericFormat *udf_get_extendedattr(struct inode *inode, uint32_t type,
                else
                        offset = le32_to_cpu(eahd->appAttrLocation);
 
-               while (offset < iinfo->i_lenEAttr) {
+               while (offset + sizeof(*gaf) < iinfo->i_lenEAttr) {
+                       uint32_t attrLength;
+
                        gaf = (struct genericFormat *)&ea[offset];
+                       attrLength = le32_to_cpu(gaf->attrLength);
+
+                       /* Detect undersized elements and buffer overflows */
+                       if ((attrLength < sizeof(*gaf)) ||
+                           (attrLength > (iinfo->i_lenEAttr - offset)))
+                               break;
+
                        if (le32_to_cpu(gaf->attrType) == type &&
                                        gaf->attrSubtype == subtype)
                                return gaf;
                        else
-                               offset += le32_to_cpu(gaf->attrLength);
+                               offset += attrLength;
                }
        }
 
index 7c7c9bb..caeef08 100644 (file)
@@ -74,12 +74,11 @@ int udf_write_fi(struct inode *inode, struct fileIdentDesc *cfi,
 
        if (fileident) {
                if (adinicb || (offset + lfi < 0)) {
-                       memcpy((uint8_t *)sfi->fileIdent + liu, fileident, lfi);
+                       memcpy(udf_get_fi_ident(sfi), fileident, lfi);
                } else if (offset >= 0) {
                        memcpy(fibh->ebh->b_data + offset, fileident, lfi);
                } else {
-                       memcpy((uint8_t *)sfi->fileIdent + liu, fileident,
-                               -offset);
+                       memcpy(udf_get_fi_ident(sfi), fileident, -offset);
                        memcpy(fibh->ebh->b_data, fileident - offset,
                                lfi + offset);
                }
@@ -88,11 +87,11 @@ int udf_write_fi(struct inode *inode, struct fileIdentDesc *cfi,
        offset += lfi;
 
        if (adinicb || (offset + padlen < 0)) {
-               memset((uint8_t *)sfi->padding + liu + lfi, 0x00, padlen);
+               memset(udf_get_fi_ident(sfi) + lfi, 0x00, padlen);
        } else if (offset >= 0) {
                memset(fibh->ebh->b_data + offset, 0x00, padlen);
        } else {
-               memset((uint8_t *)sfi->padding + liu + lfi, 0x00, -offset);
+               memset(udf_get_fi_ident(sfi) + lfi, 0x00, -offset);
                memset(fibh->ebh->b_data, 0x00, padlen + offset);
        }
 
@@ -226,7 +225,7 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
                lfi = cfi->lengthFileIdent;
 
                if (fibh->sbh == fibh->ebh) {
-                       nameptr = fi->fileIdent + liu;
+                       nameptr = udf_get_fi_ident(fi);
                } else {
                        int poffset;    /* Unpaded ending offset */
 
@@ -246,7 +245,7 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
                                        }
                                }
                                nameptr = copy_name;
-                               memcpy(nameptr, fi->fileIdent + liu,
+                               memcpy(nameptr, udf_get_fi_ident(fi),
                                        lfi - poffset);
                                memcpy(nameptr + lfi - poffset,
                                        fibh->ebh->b_data, poffset);
index 22bc4fb..157de0e 100644 (file)
@@ -111,7 +111,7 @@ struct logicalVolIntegrityDescImpUse {
        __le16          minUDFReadRev;
        __le16          minUDFWriteRev;
        __le16          maxUDFWriteRev;
-       uint8_t         impUse[0];
+       uint8_t         impUse[];
 } __packed;
 
 /* Implementation Use Volume Descriptor (UDF 2.60 2.2.7) */
@@ -178,15 +178,6 @@ struct metadataPartitionMap {
        uint8_t         reserved2[5];
 } __packed;
 
-/* Virtual Allocation Table (UDF 1.5 2.2.10) */
-struct virtualAllocationTable15 {
-       __le32          vatEntry[0];
-       struct regid    vatIdent;
-       __le32          previousVATICBLoc;
-} __packed;
-
-#define ICBTAG_FILE_TYPE_VAT15         0x00U
-
 /* Virtual Allocation Table (UDF 2.60 2.2.11) */
 struct virtualAllocationTable20 {
        __le16          lengthHeader;
@@ -199,8 +190,8 @@ struct virtualAllocationTable20 {
        __le16          minUDFWriteRev;
        __le16          maxUDFWriteRev;
        __le16          reserved;
-       uint8_t         impUse[0];
-       __le32          vatEntry[0];
+       uint8_t         impUse[];
+       /* __le32       vatEntry[]; */
 } __packed;
 
 #define ICBTAG_FILE_TYPE_VAT20         0xF8U
@@ -217,8 +208,7 @@ struct sparingTable {
        __le16          reallocationTableLen;
        __le16          reserved;
        __le32          sequenceNum;
-       struct sparingEntry
-                       mapEntry[0];
+       struct sparingEntry mapEntry[];
 } __packed;
 
 /* Metadata File (and Metadata Mirror File) (UDF 2.60 2.2.13.1) */
@@ -241,7 +231,7 @@ struct allocDescImpUse {
 /* FreeEASpace (UDF 2.60 3.3.4.5.1.1) */
 struct freeEaSpace {
        __le16          headerChecksum;
-       uint8_t         freeEASpace[0];
+       uint8_t         freeEASpace[];
 } __packed;
 
 /* DVD Copyright Management Information (UDF 2.60 3.3.4.5.1.2) */
@@ -265,7 +255,7 @@ struct LVExtensionEA {
 /* FreeAppEASpace (UDF 2.60 3.3.4.6.1) */
 struct freeAppEASpace {
        __le16          headerChecksum;
-       uint8_t         freeEASpace[0];
+       uint8_t         freeEASpace[];
 } __packed;
 
 /* UDF Defined System Stream (UDF 2.60 3.3.7) */
index 2f83c12..b2d7c57 100644 (file)
@@ -108,16 +108,10 @@ struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct super_block *sb)
                return NULL;
        lvid = (struct logicalVolIntegrityDesc *)UDF_SB(sb)->s_lvid_bh->b_data;
        partnum = le32_to_cpu(lvid->numOfPartitions);
-       if ((sb->s_blocksize - sizeof(struct logicalVolIntegrityDescImpUse) -
-            offsetof(struct logicalVolIntegrityDesc, impUse)) /
-            (2 * sizeof(uint32_t)) < partnum) {
-               udf_err(sb, "Logical volume integrity descriptor corrupted "
-                       "(numOfPartitions = %u)!\n", partnum);
-               return NULL;
-       }
        /* The offset is to skip freeSpaceTable and sizeTable arrays */
        offset = partnum * 2 * sizeof(uint32_t);
-       return (struct logicalVolIntegrityDescImpUse *)&(lvid->impUse[offset]);
+       return (struct logicalVolIntegrityDescImpUse *)
+                                       (((uint8_t *)(lvid + 1)) + offset);
 }
 
 /* UDF filesystem type */
@@ -349,10 +343,10 @@ static int udf_show_options(struct seq_file *seq, struct dentry *root)
                seq_printf(seq, ",lastblock=%u", sbi->s_last_block);
        if (sbi->s_anchor != 0)
                seq_printf(seq, ",anchor=%u", sbi->s_anchor);
-       if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8))
-               seq_puts(seq, ",utf8");
-       if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP) && sbi->s_nls_map)
+       if (sbi->s_nls_map)
                seq_printf(seq, ",iocharset=%s", sbi->s_nls_map->charset);
+       else
+               seq_puts(seq, ",iocharset=utf8");
 
        return 0;
 }
@@ -558,19 +552,24 @@ static int udf_parse_options(char *options, struct udf_options *uopt,
                        /* Ignored (never implemented properly) */
                        break;
                case Opt_utf8:
-                       uopt->flags |= (1 << UDF_FLAG_UTF8);
+                       if (!remount) {
+                               unload_nls(uopt->nls_map);
+                               uopt->nls_map = NULL;
+                       }
                        break;
                case Opt_iocharset:
                        if (!remount) {
-                               if (uopt->nls_map)
-                                       unload_nls(uopt->nls_map);
-                               /*
-                                * load_nls() failure is handled later in
-                                * udf_fill_super() after all options are
-                                * parsed.
-                                */
+                               unload_nls(uopt->nls_map);
+                               uopt->nls_map = NULL;
+                       }
+                       /* When nls_map is not loaded then UTF-8 is used */
+                       if (!remount && strcmp(args[0].from, "utf8") != 0) {
                                uopt->nls_map = load_nls(args[0].from);
-                               uopt->flags |= (1 << UDF_FLAG_NLS_MAP);
+                               if (!uopt->nls_map) {
+                                       pr_err("iocharset %s not found\n",
+                                               args[0].from);
+                                       return 0;
+                               }
                        }
                        break;
                case Opt_uforget:
@@ -1542,6 +1541,7 @@ static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_
        struct udf_sb_info *sbi = UDF_SB(sb);
        struct logicalVolIntegrityDesc *lvid;
        int indirections = 0;
+       u32 parts, impuselen;
 
        while (++indirections <= UDF_MAX_LVID_NESTING) {
                final_bh = NULL;
@@ -1568,15 +1568,27 @@ static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_
 
                lvid = (struct logicalVolIntegrityDesc *)final_bh->b_data;
                if (lvid->nextIntegrityExt.extLength == 0)
-                       return;
+                       goto check;
 
                loc = leea_to_cpu(lvid->nextIntegrityExt);
        }
 
        udf_warn(sb, "Too many LVID indirections (max %u), ignoring.\n",
                UDF_MAX_LVID_NESTING);
+out_err:
        brelse(sbi->s_lvid_bh);
        sbi->s_lvid_bh = NULL;
+       return;
+check:
+       parts = le32_to_cpu(lvid->numOfPartitions);
+       impuselen = le32_to_cpu(lvid->lengthOfImpUse);
+       if (parts >= sb->s_blocksize || impuselen >= sb->s_blocksize ||
+           sizeof(struct logicalVolIntegrityDesc) + impuselen +
+           2 * parts * sizeof(u32) > sb->s_blocksize) {
+               udf_warn(sb, "Corrupted LVID (parts=%u, impuselen=%u), "
+                        "ignoring.\n", parts, impuselen);
+               goto out_err;
+       }
 }
 
 /*
@@ -2139,21 +2151,6 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
        if (!udf_parse_options((char *)options, &uopt, false))
                goto parse_options_failure;
 
-       if (uopt.flags & (1 << UDF_FLAG_UTF8) &&
-           uopt.flags & (1 << UDF_FLAG_NLS_MAP)) {
-               udf_err(sb, "utf8 cannot be combined with iocharset\n");
-               goto parse_options_failure;
-       }
-       if ((uopt.flags & (1 << UDF_FLAG_NLS_MAP)) && !uopt.nls_map) {
-               uopt.nls_map = load_nls_default();
-               if (!uopt.nls_map)
-                       uopt.flags &= ~(1 << UDF_FLAG_NLS_MAP);
-               else
-                       udf_debug("Using default NLS map\n");
-       }
-       if (!(uopt.flags & (1 << UDF_FLAG_NLS_MAP)))
-               uopt.flags |= (1 << UDF_FLAG_UTF8);
-
        fileset.logicalBlockNum = 0xFFFFFFFF;
        fileset.partitionReferenceNum = 0xFFFF;
 
@@ -2308,8 +2305,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
 error_out:
        iput(sbi->s_vat_inode);
 parse_options_failure:
-       if (uopt.nls_map)
-               unload_nls(uopt.nls_map);
+       unload_nls(uopt.nls_map);
        if (lvid_open)
                udf_close_lvid(sb);
        brelse(sbi->s_lvid_bh);
@@ -2359,8 +2355,7 @@ static void udf_put_super(struct super_block *sb)
        sbi = UDF_SB(sb);
 
        iput(sbi->s_vat_inode);
-       if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP))
-               unload_nls(sbi->s_nls_map);
+       unload_nls(sbi->s_nls_map);
        if (!sb_rdonly(sb))
                udf_close_lvid(sb);
        brelse(sbi->s_lvid_bh);
index 758efe5..4fa6205 100644 (file)
@@ -20,8 +20,6 @@
 #define UDF_FLAG_UNDELETE              6
 #define UDF_FLAG_UNHIDE                        7
 #define UDF_FLAG_VARCONV               8
-#define UDF_FLAG_NLS_MAP               9
-#define UDF_FLAG_UTF8                  10
 #define UDF_FLAG_UID_FORGET     11    /* save -1 for uid to disk */
 #define UDF_FLAG_GID_FORGET     12
 #define UDF_FLAG_UID_SET       13
index 9dd0814..7e258f1 100644 (file)
@@ -130,6 +130,10 @@ static inline unsigned int udf_dir_entry_len(struct fileIdentDesc *cfi)
                le16_to_cpu(cfi->lengthOfImpUse) + cfi->lengthFileIdent,
                UDF_NAME_PAD);
 }
+static inline uint8_t *udf_get_fi_ident(struct fileIdentDesc *fi)
+{
+       return ((uint8_t *)(fi + 1)) + le16_to_cpu(fi->lengthOfImpUse);
+}
 
 /* file.c */
 extern long udf_ioctl(struct file *, unsigned int, unsigned long);
index 5fcfa96..6225690 100644 (file)
@@ -177,7 +177,7 @@ static int udf_name_from_CS0(struct super_block *sb,
                return 0;
        }
 
-       if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP))
+       if (UDF_SB(sb)->s_nls_map)
                conv_f = UDF_SB(sb)->s_nls_map->uni2char;
        else
                conv_f = NULL;
@@ -285,7 +285,7 @@ static int udf_name_to_CS0(struct super_block *sb,
        if (ocu_max_len <= 0)
                return 0;
 
-       if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP))
+       if (UDF_SB(sb)->s_nls_map)
                conv_f = UDF_SB(sb)->s_nls_map->char2uni;
        else
                conv_f = NULL;
index d548ea4..2c5bcbc 100644 (file)
@@ -411,7 +411,16 @@ struct xfs_log_dinode {
        /* start of the extended dinode, writable fields */
        uint32_t        di_crc;         /* CRC of the inode */
        uint64_t        di_changecount; /* number of attribute changes */
-       xfs_lsn_t       di_lsn;         /* flush sequence */
+
+       /*
+        * The LSN we write to this field during formatting is not a reflection
+        * of the current on-disk LSN. It should never be used for recovery
+        * sequencing, nor should it be recovered into the on-disk inode at all.
+        * See xlog_recover_inode_commit_pass2() and xfs_log_dinode_to_disk()
+        * for details.
+        */
+       xfs_lsn_t       di_lsn;
+
        uint64_t        di_flags2;      /* more random flags */
        uint32_t        di_cowextsize;  /* basic cow extent size for file */
        uint8_t         di_pad2[12];    /* more padding for future expansion */
index 213a97a..1cd3f94 100644 (file)
@@ -1626,7 +1626,6 @@ xfs_swap_extents(
        struct xfs_bstat        *sbp = &sxp->sx_stat;
        int                     src_log_flags, target_log_flags;
        int                     error = 0;
-       int                     lock_flags;
        uint64_t                f;
        int                     resblks = 0;
        unsigned int            flags = 0;
@@ -1638,8 +1637,8 @@ xfs_swap_extents(
         * do the rest of the checks.
         */
        lock_two_nondirectories(VFS_I(ip), VFS_I(tip));
-       lock_flags = XFS_MMAPLOCK_EXCL;
-       xfs_lock_two_inodes(ip, XFS_MMAPLOCK_EXCL, tip, XFS_MMAPLOCK_EXCL);
+       filemap_invalidate_lock_two(VFS_I(ip)->i_mapping,
+                                   VFS_I(tip)->i_mapping);
 
        /* Verify that both files have the same format */
        if ((VFS_I(ip)->i_mode & S_IFMT) != (VFS_I(tip)->i_mode & S_IFMT)) {
@@ -1711,7 +1710,6 @@ xfs_swap_extents(
         * or cancel will unlock the inodes from this point onwards.
         */
        xfs_lock_two_inodes(ip, XFS_ILOCK_EXCL, tip, XFS_ILOCK_EXCL);
-       lock_flags |= XFS_ILOCK_EXCL;
        xfs_trans_ijoin(tp, ip, 0);
        xfs_trans_ijoin(tp, tip, 0);
 
@@ -1830,13 +1828,16 @@ xfs_swap_extents(
        trace_xfs_swap_extent_after(ip, 0);
        trace_xfs_swap_extent_after(tip, 1);
 
+out_unlock_ilock:
+       xfs_iunlock(ip, XFS_ILOCK_EXCL);
+       xfs_iunlock(tip, XFS_ILOCK_EXCL);
 out_unlock:
-       xfs_iunlock(ip, lock_flags);
-       xfs_iunlock(tip, lock_flags);
+       filemap_invalidate_unlock_two(VFS_I(ip)->i_mapping,
+                                     VFS_I(tip)->i_mapping);
        unlock_two_nondirectories(VFS_I(ip), VFS_I(tip));
        return error;
 
 out_trans_cancel:
        xfs_trans_cancel(tp);
-       goto out_unlock;
+       goto out_unlock_ilock;
 }
index 8ff42b3..3ab7356 100644 (file)
@@ -844,7 +844,7 @@ xfs_buf_readahead_map(
 {
        struct xfs_buf          *bp;
 
-       if (bdi_read_congested(target->bt_bdev->bd_bdi))
+       if (bdi_read_congested(target->bt_bdev->bd_disk->bdi))
                return;
 
        xfs_buf_read_map(target, map, nmaps,
index d44e8b4..4775485 100644 (file)
@@ -698,7 +698,8 @@ xlog_recover_do_inode_buffer(
 static xfs_lsn_t
 xlog_recover_get_buf_lsn(
        struct xfs_mount        *mp,
-       struct xfs_buf          *bp)
+       struct xfs_buf          *bp,
+       struct xfs_buf_log_format *buf_f)
 {
        uint32_t                magic32;
        uint16_t                magic16;
@@ -706,11 +707,20 @@ xlog_recover_get_buf_lsn(
        void                    *blk = bp->b_addr;
        uuid_t                  *uuid;
        xfs_lsn_t               lsn = -1;
+       uint16_t                blft;
 
        /* v4 filesystems always recover immediately */
        if (!xfs_sb_version_hascrc(&mp->m_sb))
                goto recover_immediately;
 
+       /*
+        * realtime bitmap and summary file blocks do not have magic numbers or
+        * UUIDs, so we must recover them immediately.
+        */
+       blft = xfs_blft_from_flags(buf_f);
+       if (blft == XFS_BLFT_RTBITMAP_BUF || blft == XFS_BLFT_RTSUMMARY_BUF)
+               goto recover_immediately;
+
        magic32 = be32_to_cpu(*(__be32 *)blk);
        switch (magic32) {
        case XFS_ABTB_CRC_MAGIC:
@@ -796,6 +806,7 @@ xlog_recover_get_buf_lsn(
        switch (magicda) {
        case XFS_DIR3_LEAF1_MAGIC:
        case XFS_DIR3_LEAFN_MAGIC:
+       case XFS_ATTR3_LEAF_MAGIC:
        case XFS_DA3_NODE_MAGIC:
                lsn = be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn);
                uuid = &((struct xfs_da3_blkinfo *)blk)->uuid;
@@ -919,7 +930,7 @@ xlog_recover_buf_commit_pass2(
         * the verifier will be reset to match whatever recover turns that
         * buffer into.
         */
-       lsn = xlog_recover_get_buf_lsn(mp, bp);
+       lsn = xlog_recover_get_buf_lsn(mp, bp, buf_f);
        if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
                trace_xfs_log_recover_buf_skip(log, buf_f);
                xlog_recover_validate_buf_type(mp, bp, buf_f, NULLCOMMITLSN);
index cc3cfb1..3dfbdcd 100644 (file)
@@ -1302,7 +1302,7 @@ xfs_file_llseek(
  *
  * mmap_lock (MM)
  *   sb_start_pagefault(vfs, freeze)
- *     i_mmaplock (XFS - truncate serialisation)
+ *     invalidate_lock (vfs/XFS_MMAPLOCK - truncate serialisation)
  *       page_lock (MM)
  *         i_lock (XFS - extent map serialisation)
  */
@@ -1323,24 +1323,27 @@ __xfs_filemap_fault(
                file_update_time(vmf->vma->vm_file);
        }
 
-       xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
        if (IS_DAX(inode)) {
                pfn_t pfn;
 
+               xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
                ret = dax_iomap_fault(vmf, pe_size, &pfn, NULL,
                                (write_fault && !vmf->cow_page) ?
                                 &xfs_direct_write_iomap_ops :
                                 &xfs_read_iomap_ops);
                if (ret & VM_FAULT_NEEDDSYNC)
                        ret = dax_finish_sync_fault(vmf, pe_size, pfn);
+               xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
        } else {
-               if (write_fault)
+               if (write_fault) {
+                       xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
                        ret = iomap_page_mkwrite(vmf,
                                        &xfs_buffered_write_iomap_ops);
-               else
+                       xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
+               } else {
                        ret = filemap_fault(vmf);
+               }
        }
-       xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
 
        if (write_fault)
                sb_end_pagefault(inode->i_sb);
index 990b72a..f00145e 100644 (file)
@@ -132,7 +132,7 @@ xfs_ilock_attr_map_shared(
 
 /*
  * In addition to i_rwsem in the VFS inode, the xfs inode contains 2
- * multi-reader locks: i_mmap_lock and the i_lock.  This routine allows
+ * multi-reader locks: invalidate_lock and the i_lock.  This routine allows
  * various combinations of the locks to be obtained.
  *
  * The 3 locks should always be ordered so that the IO lock is obtained first,
@@ -140,23 +140,23 @@ xfs_ilock_attr_map_shared(
  *
  * Basic locking order:
  *
- * i_rwsem -> i_mmap_lock -> page_lock -> i_ilock
+ * i_rwsem -> invalidate_lock -> page_lock -> i_ilock
  *
  * mmap_lock locking order:
  *
  * i_rwsem -> page lock -> mmap_lock
- * mmap_lock -> i_mmap_lock -> page_lock
+ * mmap_lock -> invalidate_lock -> page_lock
  *
  * The difference in mmap_lock locking order mean that we cannot hold the
- * i_mmap_lock over syscall based read(2)/write(2) based IO. These IO paths can
- * fault in pages during copy in/out (for buffered IO) or require the mmap_lock
- * in get_user_pages() to map the user pages into the kernel address space for
- * direct IO. Similarly the i_rwsem cannot be taken inside a page fault because
- * page faults already hold the mmap_lock.
+ * invalidate_lock over syscall based read(2)/write(2) based IO. These IO paths
+ * can fault in pages during copy in/out (for buffered IO) or require the
+ * mmap_lock in get_user_pages() to map the user pages into the kernel address
+ * space for direct IO. Similarly the i_rwsem cannot be taken inside a page
+ * fault because page faults already hold the mmap_lock.
  *
  * Hence to serialise fully against both syscall and mmap based IO, we need to
- * take both the i_rwsem and the i_mmap_lock. These locks should *only* be both
- * taken in places where we need to invalidate the page cache in a race
+ * take both the i_rwsem and the invalidate_lock. These locks should *only* be
+ * both taken in places where we need to invalidate the page cache in a race
  * free manner (e.g. truncate, hole punch and other extent manipulation
  * functions).
  */
@@ -188,10 +188,13 @@ xfs_ilock(
                                 XFS_IOLOCK_DEP(lock_flags));
        }
 
-       if (lock_flags & XFS_MMAPLOCK_EXCL)
-               mrupdate_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
-       else if (lock_flags & XFS_MMAPLOCK_SHARED)
-               mraccess_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
+       if (lock_flags & XFS_MMAPLOCK_EXCL) {
+               down_write_nested(&VFS_I(ip)->i_mapping->invalidate_lock,
+                                 XFS_MMAPLOCK_DEP(lock_flags));
+       } else if (lock_flags & XFS_MMAPLOCK_SHARED) {
+               down_read_nested(&VFS_I(ip)->i_mapping->invalidate_lock,
+                                XFS_MMAPLOCK_DEP(lock_flags));
+       }
 
        if (lock_flags & XFS_ILOCK_EXCL)
                mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
@@ -240,10 +243,10 @@ xfs_ilock_nowait(
        }
 
        if (lock_flags & XFS_MMAPLOCK_EXCL) {
-               if (!mrtryupdate(&ip->i_mmaplock))
+               if (!down_write_trylock(&VFS_I(ip)->i_mapping->invalidate_lock))
                        goto out_undo_iolock;
        } else if (lock_flags & XFS_MMAPLOCK_SHARED) {
-               if (!mrtryaccess(&ip->i_mmaplock))
+               if (!down_read_trylock(&VFS_I(ip)->i_mapping->invalidate_lock))
                        goto out_undo_iolock;
        }
 
@@ -258,9 +261,9 @@ xfs_ilock_nowait(
 
 out_undo_mmaplock:
        if (lock_flags & XFS_MMAPLOCK_EXCL)
-               mrunlock_excl(&ip->i_mmaplock);
+               up_write(&VFS_I(ip)->i_mapping->invalidate_lock);
        else if (lock_flags & XFS_MMAPLOCK_SHARED)
-               mrunlock_shared(&ip->i_mmaplock);
+               up_read(&VFS_I(ip)->i_mapping->invalidate_lock);
 out_undo_iolock:
        if (lock_flags & XFS_IOLOCK_EXCL)
                up_write(&VFS_I(ip)->i_rwsem);
@@ -307,9 +310,9 @@ xfs_iunlock(
                up_read(&VFS_I(ip)->i_rwsem);
 
        if (lock_flags & XFS_MMAPLOCK_EXCL)
-               mrunlock_excl(&ip->i_mmaplock);
+               up_write(&VFS_I(ip)->i_mapping->invalidate_lock);
        else if (lock_flags & XFS_MMAPLOCK_SHARED)
-               mrunlock_shared(&ip->i_mmaplock);
+               up_read(&VFS_I(ip)->i_mapping->invalidate_lock);
 
        if (lock_flags & XFS_ILOCK_EXCL)
                mrunlock_excl(&ip->i_lock);
@@ -335,7 +338,7 @@ xfs_ilock_demote(
        if (lock_flags & XFS_ILOCK_EXCL)
                mrdemote(&ip->i_lock);
        if (lock_flags & XFS_MMAPLOCK_EXCL)
-               mrdemote(&ip->i_mmaplock);
+               downgrade_write(&VFS_I(ip)->i_mapping->invalidate_lock);
        if (lock_flags & XFS_IOLOCK_EXCL)
                downgrade_write(&VFS_I(ip)->i_rwsem);
 
@@ -343,9 +346,29 @@ xfs_ilock_demote(
 }
 
 #if defined(DEBUG) || defined(XFS_WARN)
-int
+static inline bool
+__xfs_rwsem_islocked(
+       struct rw_semaphore     *rwsem,
+       bool                    shared)
+{
+       if (!debug_locks)
+               return rwsem_is_locked(rwsem);
+
+       if (!shared)
+               return lockdep_is_held_type(rwsem, 0);
+
+       /*
+        * We are checking that the lock is held at least in shared
+        * mode but don't care that it might be held exclusively
+        * (i.e. shared | excl). Hence we check if the lock is held
+        * in any mode rather than an explicit shared mode.
+        */
+       return lockdep_is_held_type(rwsem, -1);
+}
+
+bool
 xfs_isilocked(
-       xfs_inode_t             *ip,
+       struct xfs_inode        *ip,
        uint                    lock_flags)
 {
        if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
@@ -355,20 +378,17 @@ xfs_isilocked(
        }
 
        if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) {
-               if (!(lock_flags & XFS_MMAPLOCK_SHARED))
-                       return !!ip->i_mmaplock.mr_writer;
-               return rwsem_is_locked(&ip->i_mmaplock.mr_lock);
+               return __xfs_rwsem_islocked(&VFS_I(ip)->i_rwsem,
+                               (lock_flags & XFS_IOLOCK_SHARED));
        }
 
-       if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) {
-               if (!(lock_flags & XFS_IOLOCK_SHARED))
-                       return !debug_locks ||
-                               lockdep_is_held_type(&VFS_I(ip)->i_rwsem, 0);
-               return rwsem_is_locked(&VFS_I(ip)->i_rwsem);
+       if (lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) {
+               return __xfs_rwsem_islocked(&VFS_I(ip)->i_rwsem,
+                               (lock_flags & XFS_IOLOCK_SHARED));
        }
 
        ASSERT(0);
-       return 0;
+       return false;
 }
 #endif
 
@@ -532,12 +552,10 @@ again:
 }
 
 /*
- * xfs_lock_two_inodes() can only be used to lock one type of lock at a time -
- * the mmaplock or the ilock, but not more than one type at a time. If we lock
- * more than one at a time, lockdep will report false positives saying we have
- * violated locking orders.  The iolock must be double-locked separately since
- * we use i_rwsem for that.  We now support taking one lock EXCL and the other
- * SHARED.
+ * xfs_lock_two_inodes() can only be used to lock ilock. The iolock and
+ * mmaplock must be double-locked separately since we use i_rwsem and
+ * invalidate_lock for that. We now support taking one lock EXCL and the
+ * other SHARED.
  */
 void
 xfs_lock_two_inodes(
@@ -555,15 +573,8 @@ xfs_lock_two_inodes(
        ASSERT(hweight32(ip1_mode) == 1);
        ASSERT(!(ip0_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
        ASSERT(!(ip1_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
-       ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
-              !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
-       ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
-              !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
-       ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
-              !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
-       ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
-              !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
-
+       ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)));
+       ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)));
        ASSERT(ip0->i_ino != ip1->i_ino);
 
        if (ip0->i_ino > ip1->i_ino) {
@@ -3741,11 +3752,8 @@ xfs_ilock2_io_mmap(
        ret = xfs_iolock_two_inodes_and_break_layout(VFS_I(ip1), VFS_I(ip2));
        if (ret)
                return ret;
-       if (ip1 == ip2)
-               xfs_ilock(ip1, XFS_MMAPLOCK_EXCL);
-       else
-               xfs_lock_two_inodes(ip1, XFS_MMAPLOCK_EXCL,
-                                   ip2, XFS_MMAPLOCK_EXCL);
+       filemap_invalidate_lock_two(VFS_I(ip1)->i_mapping,
+                                   VFS_I(ip2)->i_mapping);
        return 0;
 }
 
@@ -3755,12 +3763,9 @@ xfs_iunlock2_io_mmap(
        struct xfs_inode        *ip1,
        struct xfs_inode        *ip2)
 {
-       bool                    same_inode = (ip1 == ip2);
-
-       xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL);
-       if (!same_inode)
-               xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
+       filemap_invalidate_unlock_two(VFS_I(ip1)->i_mapping,
+                                     VFS_I(ip2)->i_mapping);
        inode_unlock(VFS_I(ip2));
-       if (!same_inode)
+       if (ip1 != ip2)
                inode_unlock(VFS_I(ip1));
 }
index 4b6703d..e0ae905 100644 (file)
@@ -40,7 +40,6 @@ typedef struct xfs_inode {
        /* Transaction and locking information. */
        struct xfs_inode_log_item *i_itemp;     /* logging information */
        mrlock_t                i_lock;         /* inode lock */
-       mrlock_t                i_mmaplock;     /* inode mmap IO lock */
        atomic_t                i_pincount;     /* inode pin count */
 
        /*
@@ -410,7 +409,7 @@ void                xfs_ilock(xfs_inode_t *, uint);
 int            xfs_ilock_nowait(xfs_inode_t *, uint);
 void           xfs_iunlock(xfs_inode_t *, uint);
 void           xfs_ilock_demote(xfs_inode_t *, uint);
-int            xfs_isilocked(xfs_inode_t *, uint);
+bool           xfs_isilocked(struct xfs_inode *, uint);
 uint           xfs_ilock_data_map_shared(struct xfs_inode *);
 uint           xfs_ilock_attr_map_shared(struct xfs_inode *);
 
index 7b79518..e0072a6 100644 (file)
@@ -145,7 +145,8 @@ xfs_log_dinode_to_disk_ts(
 STATIC void
 xfs_log_dinode_to_disk(
        struct xfs_log_dinode   *from,
-       struct xfs_dinode       *to)
+       struct xfs_dinode       *to,
+       xfs_lsn_t               lsn)
 {
        to->di_magic = cpu_to_be16(from->di_magic);
        to->di_mode = cpu_to_be16(from->di_mode);
@@ -182,7 +183,7 @@ xfs_log_dinode_to_disk(
                to->di_flags2 = cpu_to_be64(from->di_flags2);
                to->di_cowextsize = cpu_to_be32(from->di_cowextsize);
                to->di_ino = cpu_to_be64(from->di_ino);
-               to->di_lsn = cpu_to_be64(from->di_lsn);
+               to->di_lsn = cpu_to_be64(lsn);
                memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2));
                uuid_copy(&to->di_uuid, &from->di_uuid);
                to->di_flushiter = 0;
@@ -261,16 +262,25 @@ xlog_recover_inode_commit_pass2(
        }
 
        /*
-        * If the inode has an LSN in it, recover the inode only if it's less
-        * than the lsn of the transaction we are replaying. Note: we still
-        * need to replay an owner change even though the inode is more recent
-        * than the transaction as there is no guarantee that all the btree
-        * blocks are more recent than this transaction, too.
+        * If the inode has an LSN in it, recover the inode only if the on-disk
+        * inode's LSN is older than the lsn of the transaction we are
+        * replaying. We can have multiple checkpoints with the same start LSN,
+        * so the current LSN being equal to the on-disk LSN doesn't necessarily
+        * mean that the on-disk inode is more recent than the change being
+        * replayed.
+        *
+        * We must check the current_lsn against the on-disk inode
+        * here because the we can't trust the log dinode to contain a valid LSN
+        * (see comment below before replaying the log dinode for details).
+        *
+        * Note: we still need to replay an owner change even though the inode
+        * is more recent than the transaction as there is no guarantee that all
+        * the btree blocks are more recent than this transaction, too.
         */
        if (dip->di_version >= 3) {
                xfs_lsn_t       lsn = be64_to_cpu(dip->di_lsn);
 
-               if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
+               if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) > 0) {
                        trace_xfs_log_recover_inode_skip(log, in_f);
                        error = 0;
                        goto out_owner_change;
@@ -368,8 +378,17 @@ xlog_recover_inode_commit_pass2(
                goto out_release;
        }
 
-       /* recover the log dinode inode into the on disk inode */
-       xfs_log_dinode_to_disk(ldip, dip);
+       /*
+        * Recover the log dinode inode into the on disk inode.
+        *
+        * The LSN in the log dinode is garbage - it can be zero or reflect
+        * stale in-memory runtime state that isn't coherent with the changes
+        * logged in this transaction or the changes written to the on-disk
+        * inode.  Hence we write the current lSN into the inode because that
+        * matches what xfs_iflush() would write inode the inode when flushing
+        * the changes in this transaction.
+        */
+       xfs_log_dinode_to_disk(ldip, dip, current_lsn);
 
        fields = in_f->ilf_fields;
        if (fields & XFS_ILOG_DEV)
index 36fa265..60ac5fd 100644 (file)
@@ -78,13 +78,12 @@ xlog_verify_iclog(
 STATIC void
 xlog_verify_tail_lsn(
        struct xlog             *log,
-       struct xlog_in_core     *iclog,
-       xfs_lsn_t               tail_lsn);
+       struct xlog_in_core     *iclog);
 #else
 #define xlog_verify_dest_ptr(a,b)
 #define xlog_verify_grant_tail(a)
 #define xlog_verify_iclog(a,b,c)
-#define xlog_verify_tail_lsn(a,b,c)
+#define xlog_verify_tail_lsn(a,b)
 #endif
 
 STATIC int
@@ -487,51 +486,80 @@ out_error:
        return error;
 }
 
-static bool
-__xlog_state_release_iclog(
-       struct xlog             *log,
-       struct xlog_in_core     *iclog)
-{
-       lockdep_assert_held(&log->l_icloglock);
-
-       if (iclog->ic_state == XLOG_STATE_WANT_SYNC) {
-               /* update tail before writing to iclog */
-               xfs_lsn_t tail_lsn = xlog_assign_tail_lsn(log->l_mp);
-
-               iclog->ic_state = XLOG_STATE_SYNCING;
-               iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
-               xlog_verify_tail_lsn(log, iclog, tail_lsn);
-               /* cycle incremented when incrementing curr_block */
-               trace_xlog_iclog_syncing(iclog, _RET_IP_);
-               return true;
-       }
-
-       ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
-       return false;
-}
-
 /*
  * Flush iclog to disk if this is the last reference to the given iclog and the
  * it is in the WANT_SYNC state.
+ *
+ * If the caller passes in a non-zero @old_tail_lsn and the current log tail
+ * does not match, there may be metadata on disk that must be persisted before
+ * this iclog is written.  To satisfy that requirement, set the
+ * XLOG_ICL_NEED_FLUSH flag as a condition for writing this iclog with the new
+ * log tail value.
+ *
+ * If XLOG_ICL_NEED_FUA is already set on the iclog, we need to ensure that the
+ * log tail is updated correctly. NEED_FUA indicates that the iclog will be
+ * written to stable storage, and implies that a commit record is contained
+ * within the iclog. We need to ensure that the log tail does not move beyond
+ * the tail that the first commit record in the iclog ordered against, otherwise
+ * correct recovery of that checkpoint becomes dependent on future operations
+ * performed on this iclog.
+ *
+ * Hence if NEED_FUA is set and the current iclog tail lsn is empty, write the
+ * current tail into iclog. Once the iclog tail is set, future operations must
+ * not modify it, otherwise they potentially violate ordering constraints for
+ * the checkpoint commit that wrote the initial tail lsn value. The tail lsn in
+ * the iclog will get zeroed on activation of the iclog after sync, so we
+ * always capture the tail lsn on the iclog on the first NEED_FUA release
+ * regardless of the number of active reference counts on this iclog.
  */
+
 int
 xlog_state_release_iclog(
        struct xlog             *log,
-       struct xlog_in_core     *iclog)
+       struct xlog_in_core     *iclog,
+       xfs_lsn_t               old_tail_lsn)
 {
+       xfs_lsn_t               tail_lsn;
        lockdep_assert_held(&log->l_icloglock);
 
        trace_xlog_iclog_release(iclog, _RET_IP_);
        if (iclog->ic_state == XLOG_STATE_IOERROR)
                return -EIO;
 
-       if (atomic_dec_and_test(&iclog->ic_refcnt) &&
-           __xlog_state_release_iclog(log, iclog)) {
-               spin_unlock(&log->l_icloglock);
-               xlog_sync(log, iclog);
-               spin_lock(&log->l_icloglock);
+       /*
+        * Grabbing the current log tail needs to be atomic w.r.t. the writing
+        * of the tail LSN into the iclog so we guarantee that the log tail does
+        * not move between deciding if a cache flush is required and writing
+        * the LSN into the iclog below.
+        */
+       if (old_tail_lsn || iclog->ic_state == XLOG_STATE_WANT_SYNC) {
+               tail_lsn = xlog_assign_tail_lsn(log->l_mp);
+
+               if (old_tail_lsn && tail_lsn != old_tail_lsn)
+                       iclog->ic_flags |= XLOG_ICL_NEED_FLUSH;
+
+               if ((iclog->ic_flags & XLOG_ICL_NEED_FUA) &&
+                   !iclog->ic_header.h_tail_lsn)
+                       iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
        }
 
+       if (!atomic_dec_and_test(&iclog->ic_refcnt))
+               return 0;
+
+       if (iclog->ic_state != XLOG_STATE_WANT_SYNC) {
+               ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
+               return 0;
+       }
+
+       iclog->ic_state = XLOG_STATE_SYNCING;
+       if (!iclog->ic_header.h_tail_lsn)
+               iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
+       xlog_verify_tail_lsn(log, iclog);
+       trace_xlog_iclog_syncing(iclog, _RET_IP_);
+
+       spin_unlock(&log->l_icloglock);
+       xlog_sync(log, iclog);
+       spin_lock(&log->l_icloglock);
        return 0;
 }
 
@@ -773,6 +801,21 @@ xfs_log_mount_cancel(
        xfs_log_unmount(mp);
 }
 
+/*
+ * Flush out the iclog to disk ensuring that device caches are flushed and
+ * the iclog hits stable storage before any completion waiters are woken.
+ */
+static inline int
+xlog_force_iclog(
+       struct xlog_in_core     *iclog)
+{
+       atomic_inc(&iclog->ic_refcnt);
+       iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
+       if (iclog->ic_state == XLOG_STATE_ACTIVE)
+               xlog_state_switch_iclogs(iclog->ic_log, iclog, 0);
+       return xlog_state_release_iclog(iclog->ic_log, iclog, 0);
+}
+
 /*
  * Wait for the iclog and all prior iclogs to be written disk as required by the
  * log force state machine. Waiting on ic_force_wait ensures iclog completions
@@ -827,13 +870,6 @@ xlog_write_unmount_record(
        /* account for space used by record data */
        ticket->t_curr_res -= sizeof(ulf);
 
-       /*
-        * For external log devices, we need to flush the data device cache
-        * first to ensure all metadata writeback is on stable storage before we
-        * stamp the tail LSN into the unmount record.
-        */
-       if (log->l_targ != log->l_mp->m_ddev_targp)
-               blkdev_issue_flush(log->l_targ->bt_bdev);
        return xlog_write(log, &vec, ticket, NULL, NULL, XLOG_UNMOUNT_TRANS);
 }
 
@@ -865,18 +901,7 @@ out_err:
 
        spin_lock(&log->l_icloglock);
        iclog = log->l_iclog;
-       atomic_inc(&iclog->ic_refcnt);
-       if (iclog->ic_state == XLOG_STATE_ACTIVE)
-               xlog_state_switch_iclogs(log, iclog, 0);
-       else
-               ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC ||
-                      iclog->ic_state == XLOG_STATE_IOERROR);
-       /*
-        * Ensure the journal is fully flushed and on stable storage once the
-        * iclog containing the unmount record is written.
-        */
-       iclog->ic_flags |= (XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA);
-       error = xlog_state_release_iclog(log, iclog);
+       error = xlog_force_iclog(iclog);
        xlog_wait_on_iclog(iclog);
 
        if (tic) {
@@ -1796,10 +1821,20 @@ xlog_write_iclog(
         * metadata writeback and causing priority inversions.
         */
        iclog->ic_bio.bi_opf = REQ_OP_WRITE | REQ_META | REQ_SYNC | REQ_IDLE;
-       if (iclog->ic_flags & XLOG_ICL_NEED_FLUSH)
+       if (iclog->ic_flags & XLOG_ICL_NEED_FLUSH) {
                iclog->ic_bio.bi_opf |= REQ_PREFLUSH;
+               /*
+                * For external log devices, we also need to flush the data
+                * device cache first to ensure all metadata writeback covered
+                * by the LSN in this iclog is on stable storage. This is slow,
+                * but it *must* complete before we issue the external log IO.
+                */
+               if (log->l_targ != log->l_mp->m_ddev_targp)
+                       blkdev_issue_flush(log->l_mp->m_ddev_targp->bt_bdev);
+       }
        if (iclog->ic_flags & XLOG_ICL_NEED_FUA)
                iclog->ic_bio.bi_opf |= REQ_FUA;
+
        iclog->ic_flags &= ~(XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA);
 
        if (xlog_map_iclog_data(&iclog->ic_bio, iclog->ic_data, count)) {
@@ -2310,7 +2345,7 @@ xlog_write_copy_finish(
        return 0;
 
 release_iclog:
-       error = xlog_state_release_iclog(log, iclog);
+       error = xlog_state_release_iclog(log, iclog, 0);
        spin_unlock(&log->l_icloglock);
        return error;
 }
@@ -2529,7 +2564,7 @@ next_lv:
                ASSERT(optype & XLOG_COMMIT_TRANS);
                *commit_iclog = iclog;
        } else {
-               error = xlog_state_release_iclog(log, iclog);
+               error = xlog_state_release_iclog(log, iclog, 0);
        }
        spin_unlock(&log->l_icloglock);
 
@@ -2567,6 +2602,7 @@ xlog_state_activate_iclog(
        memset(iclog->ic_header.h_cycle_data, 0,
                sizeof(iclog->ic_header.h_cycle_data));
        iclog->ic_header.h_lsn = 0;
+       iclog->ic_header.h_tail_lsn = 0;
 }
 
 /*
@@ -2967,7 +3003,7 @@ restart:
                 * reference to the iclog.
                 */
                if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1))
-                       error = xlog_state_release_iclog(log, iclog);
+                       error = xlog_state_release_iclog(log, iclog, 0);
                spin_unlock(&log->l_icloglock);
                if (error)
                        return error;
@@ -3131,6 +3167,35 @@ xlog_state_switch_iclogs(
        log->l_iclog = iclog->ic_next;
 }
 
+/*
+ * Force the iclog to disk and check if the iclog has been completed before
+ * xlog_force_iclog() returns. This can happen on synchronous (e.g.
+ * pmem) or fast async storage because we drop the icloglock to issue the IO.
+ * If completion has already occurred, tell the caller so that it can avoid an
+ * unnecessary wait on the iclog.
+ */
+static int
+xlog_force_and_check_iclog(
+       struct xlog_in_core     *iclog,
+       bool                    *completed)
+{
+       xfs_lsn_t               lsn = be64_to_cpu(iclog->ic_header.h_lsn);
+       int                     error;
+
+       *completed = false;
+       error = xlog_force_iclog(iclog);
+       if (error)
+               return error;
+
+       /*
+        * If the iclog has already been completed and reused the header LSN
+        * will have been rewritten by completion
+        */
+       if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn)
+               *completed = true;
+       return 0;
+}
+
 /*
  * Write out all data in the in-core log as of this exact moment in time.
  *
@@ -3165,7 +3230,6 @@ xfs_log_force(
 {
        struct xlog             *log = mp->m_log;
        struct xlog_in_core     *iclog;
-       xfs_lsn_t               lsn;
 
        XFS_STATS_INC(mp, xs_log_force);
        trace_xfs_log_force(mp, 0, _RET_IP_);
@@ -3193,39 +3257,33 @@ xfs_log_force(
                iclog = iclog->ic_prev;
        } else if (iclog->ic_state == XLOG_STATE_ACTIVE) {
                if (atomic_read(&iclog->ic_refcnt) == 0) {
-                       /*
-                        * We are the only one with access to this iclog.
-                        *
-                        * Flush it out now.  There should be a roundoff of zero
-                        * to show that someone has already taken care of the
-                        * roundoff from the previous sync.
-                        */
-                       atomic_inc(&iclog->ic_refcnt);
-                       lsn = be64_to_cpu(iclog->ic_header.h_lsn);
-                       xlog_state_switch_iclogs(log, iclog, 0);
-                       if (xlog_state_release_iclog(log, iclog))
+                       /* We have exclusive access to this iclog. */
+                       bool    completed;
+
+                       if (xlog_force_and_check_iclog(iclog, &completed))
                                goto out_error;
 
-                       if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn)
+                       if (completed)
                                goto out_unlock;
                } else {
                        /*
-                        * Someone else is writing to this iclog.
-                        *
-                        * Use its call to flush out the data.  However, the
-                        * other thread may not force out this LR, so we mark
-                        * it WANT_SYNC.
+                        * Someone else is still writing to this iclog, so we
+                        * need to ensure that when they release the iclog it
+                        * gets synced immediately as we may be waiting on it.
                         */
                        xlog_state_switch_iclogs(log, iclog, 0);
                }
-       } else {
-               /*
-                * If the head iclog is not active nor dirty, we just attach
-                * ourselves to the head and go to sleep if necessary.
-                */
-               ;
        }
 
+       /*
+        * The iclog we are about to wait on may contain the checkpoint pushed
+        * by the above xlog_cil_force() call, but it may not have been pushed
+        * to disk yet. Like the ACTIVE case above, we need to make sure caches
+        * are flushed when this iclog is written.
+        */
+       if (iclog->ic_state == XLOG_STATE_WANT_SYNC)
+               iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
+
        if (flags & XFS_LOG_SYNC)
                return xlog_wait_on_iclog(iclog);
 out_unlock:
@@ -3245,6 +3303,7 @@ xlog_force_lsn(
        bool                    already_slept)
 {
        struct xlog_in_core     *iclog;
+       bool                    completed;
 
        spin_lock(&log->l_icloglock);
        iclog = log->l_iclog;
@@ -3258,7 +3317,8 @@ xlog_force_lsn(
                        goto out_unlock;
        }
 
-       if (iclog->ic_state == XLOG_STATE_ACTIVE) {
+       switch (iclog->ic_state) {
+       case XLOG_STATE_ACTIVE:
                /*
                 * We sleep here if we haven't already slept (e.g. this is the
                 * first time we've looked at the correct iclog buf) and the
@@ -3281,12 +3341,31 @@ xlog_force_lsn(
                                        &log->l_icloglock);
                        return -EAGAIN;
                }
-               atomic_inc(&iclog->ic_refcnt);
-               xlog_state_switch_iclogs(log, iclog, 0);
-               if (xlog_state_release_iclog(log, iclog))
+               if (xlog_force_and_check_iclog(iclog, &completed))
                        goto out_error;
                if (log_flushed)
                        *log_flushed = 1;
+               if (completed)
+                       goto out_unlock;
+               break;
+       case XLOG_STATE_WANT_SYNC:
+               /*
+                * This iclog may contain the checkpoint pushed by the
+                * xlog_cil_force_seq() call, but there are other writers still
+                * accessing it so it hasn't been pushed to disk yet. Like the
+                * ACTIVE case above, we need to make sure caches are flushed
+                * when this iclog is written.
+                */
+               iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
+               break;
+       default:
+               /*
+                * The entire checkpoint was written by the CIL force and is on
+                * its way to disk already. It will be stable when it
+                * completes, so we don't need to manipulate caches here at all.
+                * We just need to wait for completion if necessary.
+                */
+               break;
        }
 
        if (flags & XFS_LOG_SYNC)
@@ -3559,10 +3638,10 @@ xlog_verify_grant_tail(
 STATIC void
 xlog_verify_tail_lsn(
        struct xlog             *log,
-       struct xlog_in_core     *iclog,
-       xfs_lsn_t               tail_lsn)
+       struct xlog_in_core     *iclog)
 {
-    int blocks;
+       xfs_lsn_t       tail_lsn = be64_to_cpu(iclog->ic_header.h_tail_lsn);
+       int             blocks;
 
     if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) {
        blocks =
index b128aaa..4c44bc3 100644 (file)
@@ -654,8 +654,9 @@ xlog_cil_push_work(
        struct xfs_trans_header thdr;
        struct xfs_log_iovec    lhdr;
        struct xfs_log_vec      lvhdr = { NULL };
+       xfs_lsn_t               preflush_tail_lsn;
        xfs_lsn_t               commit_lsn;
-       xfs_lsn_t               push_seq;
+       xfs_csn_t               push_seq;
        struct bio              bio;
        DECLARE_COMPLETION_ONSTACK(bdev_flush);
 
@@ -730,7 +731,15 @@ xlog_cil_push_work(
         * because we hold the flush lock exclusively. Hence we can now issue
         * a cache flush to ensure all the completed metadata in the journal we
         * are about to overwrite is on stable storage.
+        *
+        * Because we are issuing this cache flush before we've written the
+        * tail lsn to the iclog, we can have metadata IO completions move the
+        * tail forwards between the completion of this flush and the iclog
+        * being written. In this case, we need to re-issue the cache flush
+        * before the iclog write. To detect whether the log tail moves, sample
+        * the tail LSN *before* we issue the flush.
         */
+       preflush_tail_lsn = atomic64_read(&log->l_tail_lsn);
        xfs_flush_bdev_async(&bio, log->l_mp->m_ddev_targp->bt_bdev,
                                &bdev_flush);
 
@@ -941,7 +950,7 @@ restart:
         * storage.
         */
        commit_iclog->ic_flags |= XLOG_ICL_NEED_FUA;
-       xlog_state_release_iclog(log, commit_iclog);
+       xlog_state_release_iclog(log, commit_iclog, preflush_tail_lsn);
        spin_unlock(&log->l_icloglock);
        return;
 
index 4c41bbf..f3e79a4 100644 (file)
@@ -59,6 +59,16 @@ enum xlog_iclog_state {
        { XLOG_STATE_DIRTY,     "XLOG_STATE_DIRTY" }, \
        { XLOG_STATE_IOERROR,   "XLOG_STATE_IOERROR" }
 
+/*
+ * In core log flags
+ */
+#define XLOG_ICL_NEED_FLUSH    (1 << 0)        /* iclog needs REQ_PREFLUSH */
+#define XLOG_ICL_NEED_FUA      (1 << 1)        /* iclog needs REQ_FUA */
+
+#define XLOG_ICL_STRINGS \
+       { XLOG_ICL_NEED_FLUSH,  "XLOG_ICL_NEED_FLUSH" }, \
+       { XLOG_ICL_NEED_FUA,    "XLOG_ICL_NEED_FUA" }
+
 
 /*
  * Log ticket flags
@@ -143,9 +153,6 @@ enum xlog_iclog_state {
 
 #define XLOG_COVER_OPS         5
 
-#define XLOG_ICL_NEED_FLUSH    (1 << 0)        /* iclog needs REQ_PREFLUSH */
-#define XLOG_ICL_NEED_FUA      (1 << 1)        /* iclog needs REQ_FUA */
-
 /* Ticket reservation region accounting */ 
 #define XLOG_TIC_LEN_MAX       15
 
@@ -497,7 +504,8 @@ int xlog_commit_record(struct xlog *log, struct xlog_ticket *ticket,
 void   xfs_log_ticket_ungrant(struct xlog *log, struct xlog_ticket *ticket);
 void   xfs_log_ticket_regrant(struct xlog *log, struct xlog_ticket *ticket);
 
-int xlog_state_release_iclog(struct xlog *log, struct xlog_in_core *iclog);
+int xlog_state_release_iclog(struct xlog *log, struct xlog_in_core *iclog,
+               xfs_lsn_t log_tail_lsn);
 
 /*
  * When we crack an atomic LSN, we sample it first so that the value will not
index 2c9e26a..102cbd6 100644 (file)
@@ -709,8 +709,6 @@ xfs_fs_inode_init_once(
        atomic_set(&ip->i_pincount, 0);
        spin_lock_init(&ip->i_flags_lock);
 
-       mrlock_init(&ip->i_mmaplock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
-                    "xfsino", ip->i_ino);
        mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
                     "xfsino", ip->i_ino);
 }
index f9d8d60..1926029 100644 (file)
@@ -3944,6 +3944,7 @@ DECLARE_EVENT_CLASS(xlog_iclog_class,
                __field(uint32_t, state)
                __field(int32_t, refcount)
                __field(uint32_t, offset)
+               __field(uint32_t, flags)
                __field(unsigned long long, lsn)
                __field(unsigned long, caller_ip)
        ),
@@ -3952,15 +3953,17 @@ DECLARE_EVENT_CLASS(xlog_iclog_class,
                __entry->state = iclog->ic_state;
                __entry->refcount = atomic_read(&iclog->ic_refcnt);
                __entry->offset = iclog->ic_offset;
+               __entry->flags = iclog->ic_flags;
                __entry->lsn = be64_to_cpu(iclog->ic_header.h_lsn);
                __entry->caller_ip = caller_ip;
        ),
-       TP_printk("dev %d:%d state %s refcnt %d offset %u lsn 0x%llx caller %pS",
+       TP_printk("dev %d:%d state %s refcnt %d offset %u lsn 0x%llx flags %s caller %pS",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  __print_symbolic(__entry->state, XLOG_STATE_STRINGS),
                  __entry->refcount,
                  __entry->offset,
                  __entry->lsn,
+                 __print_flags(__entry->flags, "|", XLOG_ICL_STRINGS),
                  (char *)__entry->caller_ip)
 
 );
index 70055d4..ddc346a 100644 (file)
@@ -462,7 +462,7 @@ static int zonefs_file_truncate(struct inode *inode, loff_t isize)
        inode_dio_wait(inode);
 
        /* Serialize against page faults */
-       down_write(&zi->i_mmap_sem);
+       filemap_invalidate_lock(inode->i_mapping);
 
        /* Serialize against zonefs_iomap_begin() */
        mutex_lock(&zi->i_truncate_mutex);
@@ -500,7 +500,7 @@ static int zonefs_file_truncate(struct inode *inode, loff_t isize)
 
 unlock:
        mutex_unlock(&zi->i_truncate_mutex);
-       up_write(&zi->i_mmap_sem);
+       filemap_invalidate_unlock(inode->i_mapping);
 
        return ret;
 }
@@ -575,18 +575,6 @@ static int zonefs_file_fsync(struct file *file, loff_t start, loff_t end,
        return ret;
 }
 
-static vm_fault_t zonefs_filemap_fault(struct vm_fault *vmf)
-{
-       struct zonefs_inode_info *zi = ZONEFS_I(file_inode(vmf->vma->vm_file));
-       vm_fault_t ret;
-
-       down_read(&zi->i_mmap_sem);
-       ret = filemap_fault(vmf);
-       up_read(&zi->i_mmap_sem);
-
-       return ret;
-}
-
 static vm_fault_t zonefs_filemap_page_mkwrite(struct vm_fault *vmf)
 {
        struct inode *inode = file_inode(vmf->vma->vm_file);
@@ -607,16 +595,16 @@ static vm_fault_t zonefs_filemap_page_mkwrite(struct vm_fault *vmf)
        file_update_time(vmf->vma->vm_file);
 
        /* Serialize against truncates */
-       down_read(&zi->i_mmap_sem);
+       filemap_invalidate_lock_shared(inode->i_mapping);
        ret = iomap_page_mkwrite(vmf, &zonefs_iomap_ops);
-       up_read(&zi->i_mmap_sem);
+       filemap_invalidate_unlock_shared(inode->i_mapping);
 
        sb_end_pagefault(inode->i_sb);
        return ret;
 }
 
 static const struct vm_operations_struct zonefs_file_vm_ops = {
-       .fault          = zonefs_filemap_fault,
+       .fault          = filemap_fault,
        .map_pages      = filemap_map_pages,
        .page_mkwrite   = zonefs_filemap_page_mkwrite,
 };
@@ -1155,7 +1143,6 @@ static struct inode *zonefs_alloc_inode(struct super_block *sb)
 
        inode_init_once(&zi->i_vnode);
        mutex_init(&zi->i_truncate_mutex);
-       init_rwsem(&zi->i_mmap_sem);
        zi->i_wr_refcnt = 0;
 
        return &zi->i_vnode;
index 5114190..7b14790 100644 (file)
@@ -70,12 +70,11 @@ struct zonefs_inode_info {
         * and changes to the inode private data, and in particular changes to
         * a sequential file size on completion of direct IO writes.
         * Serialization of mmap read IOs with truncate and syscall IO
-        * operations is done with i_mmap_sem in addition to i_truncate_mutex.
-        * Only zonefs_seq_file_truncate() takes both lock (i_mmap_sem first,
-        * i_truncate_mutex second).
+        * operations is done with invalidate_lock in addition to
+        * i_truncate_mutex.  Only zonefs_seq_file_truncate() takes both lock
+        * (invalidate_lock first, i_truncate_mutex second).
         */
        struct mutex            i_truncate_mutex;
-       struct rw_semaphore     i_mmap_sem;
 
        /* guarded by i_truncate_mutex */
        unsigned int            i_wr_refcnt;
diff --git a/include/asm-generic/atomic-instrumented.h b/include/asm-generic/atomic-instrumented.h
deleted file mode 100644 (file)
index bc45af5..0000000
+++ /dev/null
@@ -1,1337 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-// Generated by scripts/atomic/gen-atomic-instrumented.sh
-// DO NOT MODIFY THIS FILE DIRECTLY
-
-/*
- * This file provides wrappers with KASAN instrumentation for atomic operations.
- * To use this functionality an arch's atomic.h file needs to define all
- * atomic operations with arch_ prefix (e.g. arch_atomic_read()) and include
- * this file at the end. This file provides atomic_read() that forwards to
- * arch_atomic_read() for actual atomic operation.
- * Note: if an arch atomic operation is implemented by means of other atomic
- * operations (e.g. atomic_read()/atomic_cmpxchg() loop), then it needs to use
- * arch_ variants (i.e. arch_atomic_read()/arch_atomic_cmpxchg()) to avoid
- * double instrumentation.
- */
-#ifndef _ASM_GENERIC_ATOMIC_INSTRUMENTED_H
-#define _ASM_GENERIC_ATOMIC_INSTRUMENTED_H
-
-#include <linux/build_bug.h>
-#include <linux/compiler.h>
-#include <linux/instrumented.h>
-
-static __always_inline int
-atomic_read(const atomic_t *v)
-{
-       instrument_atomic_read(v, sizeof(*v));
-       return arch_atomic_read(v);
-}
-
-static __always_inline int
-atomic_read_acquire(const atomic_t *v)
-{
-       instrument_atomic_read(v, sizeof(*v));
-       return arch_atomic_read_acquire(v);
-}
-
-static __always_inline void
-atomic_set(atomic_t *v, int i)
-{
-       instrument_atomic_write(v, sizeof(*v));
-       arch_atomic_set(v, i);
-}
-
-static __always_inline void
-atomic_set_release(atomic_t *v, int i)
-{
-       instrument_atomic_write(v, sizeof(*v));
-       arch_atomic_set_release(v, i);
-}
-
-static __always_inline void
-atomic_add(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       arch_atomic_add(i, v);
-}
-
-static __always_inline int
-atomic_add_return(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_add_return(i, v);
-}
-
-static __always_inline int
-atomic_add_return_acquire(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_add_return_acquire(i, v);
-}
-
-static __always_inline int
-atomic_add_return_release(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_add_return_release(i, v);
-}
-
-static __always_inline int
-atomic_add_return_relaxed(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_add_return_relaxed(i, v);
-}
-
-static __always_inline int
-atomic_fetch_add(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_add(i, v);
-}
-
-static __always_inline int
-atomic_fetch_add_acquire(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_add_acquire(i, v);
-}
-
-static __always_inline int
-atomic_fetch_add_release(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_add_release(i, v);
-}
-
-static __always_inline int
-atomic_fetch_add_relaxed(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_add_relaxed(i, v);
-}
-
-static __always_inline void
-atomic_sub(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       arch_atomic_sub(i, v);
-}
-
-static __always_inline int
-atomic_sub_return(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_sub_return(i, v);
-}
-
-static __always_inline int
-atomic_sub_return_acquire(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_sub_return_acquire(i, v);
-}
-
-static __always_inline int
-atomic_sub_return_release(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_sub_return_release(i, v);
-}
-
-static __always_inline int
-atomic_sub_return_relaxed(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_sub_return_relaxed(i, v);
-}
-
-static __always_inline int
-atomic_fetch_sub(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_sub(i, v);
-}
-
-static __always_inline int
-atomic_fetch_sub_acquire(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_sub_acquire(i, v);
-}
-
-static __always_inline int
-atomic_fetch_sub_release(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_sub_release(i, v);
-}
-
-static __always_inline int
-atomic_fetch_sub_relaxed(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_sub_relaxed(i, v);
-}
-
-static __always_inline void
-atomic_inc(atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       arch_atomic_inc(v);
-}
-
-static __always_inline int
-atomic_inc_return(atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_inc_return(v);
-}
-
-static __always_inline int
-atomic_inc_return_acquire(atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_inc_return_acquire(v);
-}
-
-static __always_inline int
-atomic_inc_return_release(atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_inc_return_release(v);
-}
-
-static __always_inline int
-atomic_inc_return_relaxed(atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_inc_return_relaxed(v);
-}
-
-static __always_inline int
-atomic_fetch_inc(atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_inc(v);
-}
-
-static __always_inline int
-atomic_fetch_inc_acquire(atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_inc_acquire(v);
-}
-
-static __always_inline int
-atomic_fetch_inc_release(atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_inc_release(v);
-}
-
-static __always_inline int
-atomic_fetch_inc_relaxed(atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_inc_relaxed(v);
-}
-
-static __always_inline void
-atomic_dec(atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       arch_atomic_dec(v);
-}
-
-static __always_inline int
-atomic_dec_return(atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_dec_return(v);
-}
-
-static __always_inline int
-atomic_dec_return_acquire(atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_dec_return_acquire(v);
-}
-
-static __always_inline int
-atomic_dec_return_release(atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_dec_return_release(v);
-}
-
-static __always_inline int
-atomic_dec_return_relaxed(atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_dec_return_relaxed(v);
-}
-
-static __always_inline int
-atomic_fetch_dec(atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_dec(v);
-}
-
-static __always_inline int
-atomic_fetch_dec_acquire(atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_dec_acquire(v);
-}
-
-static __always_inline int
-atomic_fetch_dec_release(atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_dec_release(v);
-}
-
-static __always_inline int
-atomic_fetch_dec_relaxed(atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_dec_relaxed(v);
-}
-
-static __always_inline void
-atomic_and(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       arch_atomic_and(i, v);
-}
-
-static __always_inline int
-atomic_fetch_and(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_and(i, v);
-}
-
-static __always_inline int
-atomic_fetch_and_acquire(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_and_acquire(i, v);
-}
-
-static __always_inline int
-atomic_fetch_and_release(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_and_release(i, v);
-}
-
-static __always_inline int
-atomic_fetch_and_relaxed(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_and_relaxed(i, v);
-}
-
-static __always_inline void
-atomic_andnot(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       arch_atomic_andnot(i, v);
-}
-
-static __always_inline int
-atomic_fetch_andnot(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_andnot(i, v);
-}
-
-static __always_inline int
-atomic_fetch_andnot_acquire(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_andnot_acquire(i, v);
-}
-
-static __always_inline int
-atomic_fetch_andnot_release(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_andnot_release(i, v);
-}
-
-static __always_inline int
-atomic_fetch_andnot_relaxed(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_andnot_relaxed(i, v);
-}
-
-static __always_inline void
-atomic_or(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       arch_atomic_or(i, v);
-}
-
-static __always_inline int
-atomic_fetch_or(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_or(i, v);
-}
-
-static __always_inline int
-atomic_fetch_or_acquire(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_or_acquire(i, v);
-}
-
-static __always_inline int
-atomic_fetch_or_release(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_or_release(i, v);
-}
-
-static __always_inline int
-atomic_fetch_or_relaxed(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_or_relaxed(i, v);
-}
-
-static __always_inline void
-atomic_xor(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       arch_atomic_xor(i, v);
-}
-
-static __always_inline int
-atomic_fetch_xor(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_xor(i, v);
-}
-
-static __always_inline int
-atomic_fetch_xor_acquire(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_xor_acquire(i, v);
-}
-
-static __always_inline int
-atomic_fetch_xor_release(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_xor_release(i, v);
-}
-
-static __always_inline int
-atomic_fetch_xor_relaxed(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_xor_relaxed(i, v);
-}
-
-static __always_inline int
-atomic_xchg(atomic_t *v, int i)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_xchg(v, i);
-}
-
-static __always_inline int
-atomic_xchg_acquire(atomic_t *v, int i)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_xchg_acquire(v, i);
-}
-
-static __always_inline int
-atomic_xchg_release(atomic_t *v, int i)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_xchg_release(v, i);
-}
-
-static __always_inline int
-atomic_xchg_relaxed(atomic_t *v, int i)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_xchg_relaxed(v, i);
-}
-
-static __always_inline int
-atomic_cmpxchg(atomic_t *v, int old, int new)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_cmpxchg(v, old, new);
-}
-
-static __always_inline int
-atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_cmpxchg_acquire(v, old, new);
-}
-
-static __always_inline int
-atomic_cmpxchg_release(atomic_t *v, int old, int new)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_cmpxchg_release(v, old, new);
-}
-
-static __always_inline int
-atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_cmpxchg_relaxed(v, old, new);
-}
-
-static __always_inline bool
-atomic_try_cmpxchg(atomic_t *v, int *old, int new)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       instrument_atomic_read_write(old, sizeof(*old));
-       return arch_atomic_try_cmpxchg(v, old, new);
-}
-
-static __always_inline bool
-atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       instrument_atomic_read_write(old, sizeof(*old));
-       return arch_atomic_try_cmpxchg_acquire(v, old, new);
-}
-
-static __always_inline bool
-atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       instrument_atomic_read_write(old, sizeof(*old));
-       return arch_atomic_try_cmpxchg_release(v, old, new);
-}
-
-static __always_inline bool
-atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       instrument_atomic_read_write(old, sizeof(*old));
-       return arch_atomic_try_cmpxchg_relaxed(v, old, new);
-}
-
-static __always_inline bool
-atomic_sub_and_test(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_sub_and_test(i, v);
-}
-
-static __always_inline bool
-atomic_dec_and_test(atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_dec_and_test(v);
-}
-
-static __always_inline bool
-atomic_inc_and_test(atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_inc_and_test(v);
-}
-
-static __always_inline bool
-atomic_add_negative(int i, atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_add_negative(i, v);
-}
-
-static __always_inline int
-atomic_fetch_add_unless(atomic_t *v, int a, int u)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_fetch_add_unless(v, a, u);
-}
-
-static __always_inline bool
-atomic_add_unless(atomic_t *v, int a, int u)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_add_unless(v, a, u);
-}
-
-static __always_inline bool
-atomic_inc_not_zero(atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_inc_not_zero(v);
-}
-
-static __always_inline bool
-atomic_inc_unless_negative(atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_inc_unless_negative(v);
-}
-
-static __always_inline bool
-atomic_dec_unless_positive(atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_dec_unless_positive(v);
-}
-
-static __always_inline int
-atomic_dec_if_positive(atomic_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic_dec_if_positive(v);
-}
-
-static __always_inline s64
-atomic64_read(const atomic64_t *v)
-{
-       instrument_atomic_read(v, sizeof(*v));
-       return arch_atomic64_read(v);
-}
-
-static __always_inline s64
-atomic64_read_acquire(const atomic64_t *v)
-{
-       instrument_atomic_read(v, sizeof(*v));
-       return arch_atomic64_read_acquire(v);
-}
-
-static __always_inline void
-atomic64_set(atomic64_t *v, s64 i)
-{
-       instrument_atomic_write(v, sizeof(*v));
-       arch_atomic64_set(v, i);
-}
-
-static __always_inline void
-atomic64_set_release(atomic64_t *v, s64 i)
-{
-       instrument_atomic_write(v, sizeof(*v));
-       arch_atomic64_set_release(v, i);
-}
-
-static __always_inline void
-atomic64_add(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       arch_atomic64_add(i, v);
-}
-
-static __always_inline s64
-atomic64_add_return(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_add_return(i, v);
-}
-
-static __always_inline s64
-atomic64_add_return_acquire(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_add_return_acquire(i, v);
-}
-
-static __always_inline s64
-atomic64_add_return_release(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_add_return_release(i, v);
-}
-
-static __always_inline s64
-atomic64_add_return_relaxed(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_add_return_relaxed(i, v);
-}
-
-static __always_inline s64
-atomic64_fetch_add(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_add(i, v);
-}
-
-static __always_inline s64
-atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_add_acquire(i, v);
-}
-
-static __always_inline s64
-atomic64_fetch_add_release(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_add_release(i, v);
-}
-
-static __always_inline s64
-atomic64_fetch_add_relaxed(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_add_relaxed(i, v);
-}
-
-static __always_inline void
-atomic64_sub(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       arch_atomic64_sub(i, v);
-}
-
-static __always_inline s64
-atomic64_sub_return(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_sub_return(i, v);
-}
-
-static __always_inline s64
-atomic64_sub_return_acquire(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_sub_return_acquire(i, v);
-}
-
-static __always_inline s64
-atomic64_sub_return_release(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_sub_return_release(i, v);
-}
-
-static __always_inline s64
-atomic64_sub_return_relaxed(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_sub_return_relaxed(i, v);
-}
-
-static __always_inline s64
-atomic64_fetch_sub(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_sub(i, v);
-}
-
-static __always_inline s64
-atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_sub_acquire(i, v);
-}
-
-static __always_inline s64
-atomic64_fetch_sub_release(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_sub_release(i, v);
-}
-
-static __always_inline s64
-atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_sub_relaxed(i, v);
-}
-
-static __always_inline void
-atomic64_inc(atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       arch_atomic64_inc(v);
-}
-
-static __always_inline s64
-atomic64_inc_return(atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_inc_return(v);
-}
-
-static __always_inline s64
-atomic64_inc_return_acquire(atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_inc_return_acquire(v);
-}
-
-static __always_inline s64
-atomic64_inc_return_release(atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_inc_return_release(v);
-}
-
-static __always_inline s64
-atomic64_inc_return_relaxed(atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_inc_return_relaxed(v);
-}
-
-static __always_inline s64
-atomic64_fetch_inc(atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_inc(v);
-}
-
-static __always_inline s64
-atomic64_fetch_inc_acquire(atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_inc_acquire(v);
-}
-
-static __always_inline s64
-atomic64_fetch_inc_release(atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_inc_release(v);
-}
-
-static __always_inline s64
-atomic64_fetch_inc_relaxed(atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_inc_relaxed(v);
-}
-
-static __always_inline void
-atomic64_dec(atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       arch_atomic64_dec(v);
-}
-
-static __always_inline s64
-atomic64_dec_return(atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_dec_return(v);
-}
-
-static __always_inline s64
-atomic64_dec_return_acquire(atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_dec_return_acquire(v);
-}
-
-static __always_inline s64
-atomic64_dec_return_release(atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_dec_return_release(v);
-}
-
-static __always_inline s64
-atomic64_dec_return_relaxed(atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_dec_return_relaxed(v);
-}
-
-static __always_inline s64
-atomic64_fetch_dec(atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_dec(v);
-}
-
-static __always_inline s64
-atomic64_fetch_dec_acquire(atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_dec_acquire(v);
-}
-
-static __always_inline s64
-atomic64_fetch_dec_release(atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_dec_release(v);
-}
-
-static __always_inline s64
-atomic64_fetch_dec_relaxed(atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_dec_relaxed(v);
-}
-
-static __always_inline void
-atomic64_and(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       arch_atomic64_and(i, v);
-}
-
-static __always_inline s64
-atomic64_fetch_and(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_and(i, v);
-}
-
-static __always_inline s64
-atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_and_acquire(i, v);
-}
-
-static __always_inline s64
-atomic64_fetch_and_release(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_and_release(i, v);
-}
-
-static __always_inline s64
-atomic64_fetch_and_relaxed(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_and_relaxed(i, v);
-}
-
-static __always_inline void
-atomic64_andnot(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       arch_atomic64_andnot(i, v);
-}
-
-static __always_inline s64
-atomic64_fetch_andnot(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_andnot(i, v);
-}
-
-static __always_inline s64
-atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_andnot_acquire(i, v);
-}
-
-static __always_inline s64
-atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_andnot_release(i, v);
-}
-
-static __always_inline s64
-atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_andnot_relaxed(i, v);
-}
-
-static __always_inline void
-atomic64_or(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       arch_atomic64_or(i, v);
-}
-
-static __always_inline s64
-atomic64_fetch_or(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_or(i, v);
-}
-
-static __always_inline s64
-atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_or_acquire(i, v);
-}
-
-static __always_inline s64
-atomic64_fetch_or_release(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_or_release(i, v);
-}
-
-static __always_inline s64
-atomic64_fetch_or_relaxed(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_or_relaxed(i, v);
-}
-
-static __always_inline void
-atomic64_xor(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       arch_atomic64_xor(i, v);
-}
-
-static __always_inline s64
-atomic64_fetch_xor(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_xor(i, v);
-}
-
-static __always_inline s64
-atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_xor_acquire(i, v);
-}
-
-static __always_inline s64
-atomic64_fetch_xor_release(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_xor_release(i, v);
-}
-
-static __always_inline s64
-atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_xor_relaxed(i, v);
-}
-
-static __always_inline s64
-atomic64_xchg(atomic64_t *v, s64 i)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_xchg(v, i);
-}
-
-static __always_inline s64
-atomic64_xchg_acquire(atomic64_t *v, s64 i)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_xchg_acquire(v, i);
-}
-
-static __always_inline s64
-atomic64_xchg_release(atomic64_t *v, s64 i)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_xchg_release(v, i);
-}
-
-static __always_inline s64
-atomic64_xchg_relaxed(atomic64_t *v, s64 i)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_xchg_relaxed(v, i);
-}
-
-static __always_inline s64
-atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_cmpxchg(v, old, new);
-}
-
-static __always_inline s64
-atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_cmpxchg_acquire(v, old, new);
-}
-
-static __always_inline s64
-atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_cmpxchg_release(v, old, new);
-}
-
-static __always_inline s64
-atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_cmpxchg_relaxed(v, old, new);
-}
-
-static __always_inline bool
-atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       instrument_atomic_read_write(old, sizeof(*old));
-       return arch_atomic64_try_cmpxchg(v, old, new);
-}
-
-static __always_inline bool
-atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       instrument_atomic_read_write(old, sizeof(*old));
-       return arch_atomic64_try_cmpxchg_acquire(v, old, new);
-}
-
-static __always_inline bool
-atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       instrument_atomic_read_write(old, sizeof(*old));
-       return arch_atomic64_try_cmpxchg_release(v, old, new);
-}
-
-static __always_inline bool
-atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       instrument_atomic_read_write(old, sizeof(*old));
-       return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
-}
-
-static __always_inline bool
-atomic64_sub_and_test(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_sub_and_test(i, v);
-}
-
-static __always_inline bool
-atomic64_dec_and_test(atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_dec_and_test(v);
-}
-
-static __always_inline bool
-atomic64_inc_and_test(atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_inc_and_test(v);
-}
-
-static __always_inline bool
-atomic64_add_negative(s64 i, atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_add_negative(i, v);
-}
-
-static __always_inline s64
-atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_fetch_add_unless(v, a, u);
-}
-
-static __always_inline bool
-atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_add_unless(v, a, u);
-}
-
-static __always_inline bool
-atomic64_inc_not_zero(atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_inc_not_zero(v);
-}
-
-static __always_inline bool
-atomic64_inc_unless_negative(atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_inc_unless_negative(v);
-}
-
-static __always_inline bool
-atomic64_dec_unless_positive(atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_dec_unless_positive(v);
-}
-
-static __always_inline s64
-atomic64_dec_if_positive(atomic64_t *v)
-{
-       instrument_atomic_read_write(v, sizeof(*v));
-       return arch_atomic64_dec_if_positive(v);
-}
-
-#define xchg(ptr, ...) \
-({ \
-       typeof(ptr) __ai_ptr = (ptr); \
-       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
-       arch_xchg(__ai_ptr, __VA_ARGS__); \
-})
-
-#define xchg_acquire(ptr, ...) \
-({ \
-       typeof(ptr) __ai_ptr = (ptr); \
-       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
-       arch_xchg_acquire(__ai_ptr, __VA_ARGS__); \
-})
-
-#define xchg_release(ptr, ...) \
-({ \
-       typeof(ptr) __ai_ptr = (ptr); \
-       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
-       arch_xchg_release(__ai_ptr, __VA_ARGS__); \
-})
-
-#define xchg_relaxed(ptr, ...) \
-({ \
-       typeof(ptr) __ai_ptr = (ptr); \
-       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
-       arch_xchg_relaxed(__ai_ptr, __VA_ARGS__); \
-})
-
-#define cmpxchg(ptr, ...) \
-({ \
-       typeof(ptr) __ai_ptr = (ptr); \
-       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
-       arch_cmpxchg(__ai_ptr, __VA_ARGS__); \
-})
-
-#define cmpxchg_acquire(ptr, ...) \
-({ \
-       typeof(ptr) __ai_ptr = (ptr); \
-       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
-       arch_cmpxchg_acquire(__ai_ptr, __VA_ARGS__); \
-})
-
-#define cmpxchg_release(ptr, ...) \
-({ \
-       typeof(ptr) __ai_ptr = (ptr); \
-       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
-       arch_cmpxchg_release(__ai_ptr, __VA_ARGS__); \
-})
-
-#define cmpxchg_relaxed(ptr, ...) \
-({ \
-       typeof(ptr) __ai_ptr = (ptr); \
-       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
-       arch_cmpxchg_relaxed(__ai_ptr, __VA_ARGS__); \
-})
-
-#define cmpxchg64(ptr, ...) \
-({ \
-       typeof(ptr) __ai_ptr = (ptr); \
-       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
-       arch_cmpxchg64(__ai_ptr, __VA_ARGS__); \
-})
-
-#define cmpxchg64_acquire(ptr, ...) \
-({ \
-       typeof(ptr) __ai_ptr = (ptr); \
-       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
-       arch_cmpxchg64_acquire(__ai_ptr, __VA_ARGS__); \
-})
-
-#define cmpxchg64_release(ptr, ...) \
-({ \
-       typeof(ptr) __ai_ptr = (ptr); \
-       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
-       arch_cmpxchg64_release(__ai_ptr, __VA_ARGS__); \
-})
-
-#define cmpxchg64_relaxed(ptr, ...) \
-({ \
-       typeof(ptr) __ai_ptr = (ptr); \
-       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
-       arch_cmpxchg64_relaxed(__ai_ptr, __VA_ARGS__); \
-})
-
-#define try_cmpxchg(ptr, oldp, ...) \
-({ \
-       typeof(ptr) __ai_ptr = (ptr); \
-       typeof(oldp) __ai_oldp = (oldp); \
-       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
-       instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \
-       arch_try_cmpxchg(__ai_ptr, __ai_oldp, __VA_ARGS__); \
-})
-
-#define try_cmpxchg_acquire(ptr, oldp, ...) \
-({ \
-       typeof(ptr) __ai_ptr = (ptr); \
-       typeof(oldp) __ai_oldp = (oldp); \
-       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
-       instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \
-       arch_try_cmpxchg_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \
-})
-
-#define try_cmpxchg_release(ptr, oldp, ...) \
-({ \
-       typeof(ptr) __ai_ptr = (ptr); \
-       typeof(oldp) __ai_oldp = (oldp); \
-       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
-       instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \
-       arch_try_cmpxchg_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \
-})
-
-#define try_cmpxchg_relaxed(ptr, oldp, ...) \
-({ \
-       typeof(ptr) __ai_ptr = (ptr); \
-       typeof(oldp) __ai_oldp = (oldp); \
-       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
-       instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \
-       arch_try_cmpxchg_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \
-})
-
-#define cmpxchg_local(ptr, ...) \
-({ \
-       typeof(ptr) __ai_ptr = (ptr); \
-       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
-       arch_cmpxchg_local(__ai_ptr, __VA_ARGS__); \
-})
-
-#define cmpxchg64_local(ptr, ...) \
-({ \
-       typeof(ptr) __ai_ptr = (ptr); \
-       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
-       arch_cmpxchg64_local(__ai_ptr, __VA_ARGS__); \
-})
-
-#define sync_cmpxchg(ptr, ...) \
-({ \
-       typeof(ptr) __ai_ptr = (ptr); \
-       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
-       arch_sync_cmpxchg(__ai_ptr, __VA_ARGS__); \
-})
-
-#define cmpxchg_double(ptr, ...) \
-({ \
-       typeof(ptr) __ai_ptr = (ptr); \
-       instrument_atomic_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \
-       arch_cmpxchg_double(__ai_ptr, __VA_ARGS__); \
-})
-
-
-#define cmpxchg_double_local(ptr, ...) \
-({ \
-       typeof(ptr) __ai_ptr = (ptr); \
-       instrument_atomic_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \
-       arch_cmpxchg_double_local(__ai_ptr, __VA_ARGS__); \
-})
-
-#endif /* _ASM_GENERIC_ATOMIC_INSTRUMENTED_H */
-// 1d7c3a25aca5c7fb031c307be4c3d24c7b48fcd5
diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
deleted file mode 100644 (file)
index 073cf40..0000000
+++ /dev/null
@@ -1,1014 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-// Generated by scripts/atomic/gen-atomic-long.sh
-// DO NOT MODIFY THIS FILE DIRECTLY
-
-#ifndef _ASM_GENERIC_ATOMIC_LONG_H
-#define _ASM_GENERIC_ATOMIC_LONG_H
-
-#include <linux/compiler.h>
-#include <asm/types.h>
-
-#ifdef CONFIG_64BIT
-typedef atomic64_t atomic_long_t;
-#define ATOMIC_LONG_INIT(i)            ATOMIC64_INIT(i)
-#define atomic_long_cond_read_acquire  atomic64_cond_read_acquire
-#define atomic_long_cond_read_relaxed  atomic64_cond_read_relaxed
-#else
-typedef atomic_t atomic_long_t;
-#define ATOMIC_LONG_INIT(i)            ATOMIC_INIT(i)
-#define atomic_long_cond_read_acquire  atomic_cond_read_acquire
-#define atomic_long_cond_read_relaxed  atomic_cond_read_relaxed
-#endif
-
-#ifdef CONFIG_64BIT
-
-static __always_inline long
-atomic_long_read(const atomic_long_t *v)
-{
-       return atomic64_read(v);
-}
-
-static __always_inline long
-atomic_long_read_acquire(const atomic_long_t *v)
-{
-       return atomic64_read_acquire(v);
-}
-
-static __always_inline void
-atomic_long_set(atomic_long_t *v, long i)
-{
-       atomic64_set(v, i);
-}
-
-static __always_inline void
-atomic_long_set_release(atomic_long_t *v, long i)
-{
-       atomic64_set_release(v, i);
-}
-
-static __always_inline void
-atomic_long_add(long i, atomic_long_t *v)
-{
-       atomic64_add(i, v);
-}
-
-static __always_inline long
-atomic_long_add_return(long i, atomic_long_t *v)
-{
-       return atomic64_add_return(i, v);
-}
-
-static __always_inline long
-atomic_long_add_return_acquire(long i, atomic_long_t *v)
-{
-       return atomic64_add_return_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_add_return_release(long i, atomic_long_t *v)
-{
-       return atomic64_add_return_release(i, v);
-}
-
-static __always_inline long
-atomic_long_add_return_relaxed(long i, atomic_long_t *v)
-{
-       return atomic64_add_return_relaxed(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_add(long i, atomic_long_t *v)
-{
-       return atomic64_fetch_add(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
-{
-       return atomic64_fetch_add_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_add_release(long i, atomic_long_t *v)
-{
-       return atomic64_fetch_add_release(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_add_relaxed(long i, atomic_long_t *v)
-{
-       return atomic64_fetch_add_relaxed(i, v);
-}
-
-static __always_inline void
-atomic_long_sub(long i, atomic_long_t *v)
-{
-       atomic64_sub(i, v);
-}
-
-static __always_inline long
-atomic_long_sub_return(long i, atomic_long_t *v)
-{
-       return atomic64_sub_return(i, v);
-}
-
-static __always_inline long
-atomic_long_sub_return_acquire(long i, atomic_long_t *v)
-{
-       return atomic64_sub_return_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_sub_return_release(long i, atomic_long_t *v)
-{
-       return atomic64_sub_return_release(i, v);
-}
-
-static __always_inline long
-atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
-{
-       return atomic64_sub_return_relaxed(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_sub(long i, atomic_long_t *v)
-{
-       return atomic64_fetch_sub(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
-{
-       return atomic64_fetch_sub_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_sub_release(long i, atomic_long_t *v)
-{
-       return atomic64_fetch_sub_release(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v)
-{
-       return atomic64_fetch_sub_relaxed(i, v);
-}
-
-static __always_inline void
-atomic_long_inc(atomic_long_t *v)
-{
-       atomic64_inc(v);
-}
-
-static __always_inline long
-atomic_long_inc_return(atomic_long_t *v)
-{
-       return atomic64_inc_return(v);
-}
-
-static __always_inline long
-atomic_long_inc_return_acquire(atomic_long_t *v)
-{
-       return atomic64_inc_return_acquire(v);
-}
-
-static __always_inline long
-atomic_long_inc_return_release(atomic_long_t *v)
-{
-       return atomic64_inc_return_release(v);
-}
-
-static __always_inline long
-atomic_long_inc_return_relaxed(atomic_long_t *v)
-{
-       return atomic64_inc_return_relaxed(v);
-}
-
-static __always_inline long
-atomic_long_fetch_inc(atomic_long_t *v)
-{
-       return atomic64_fetch_inc(v);
-}
-
-static __always_inline long
-atomic_long_fetch_inc_acquire(atomic_long_t *v)
-{
-       return atomic64_fetch_inc_acquire(v);
-}
-
-static __always_inline long
-atomic_long_fetch_inc_release(atomic_long_t *v)
-{
-       return atomic64_fetch_inc_release(v);
-}
-
-static __always_inline long
-atomic_long_fetch_inc_relaxed(atomic_long_t *v)
-{
-       return atomic64_fetch_inc_relaxed(v);
-}
-
-static __always_inline void
-atomic_long_dec(atomic_long_t *v)
-{
-       atomic64_dec(v);
-}
-
-static __always_inline long
-atomic_long_dec_return(atomic_long_t *v)
-{
-       return atomic64_dec_return(v);
-}
-
-static __always_inline long
-atomic_long_dec_return_acquire(atomic_long_t *v)
-{
-       return atomic64_dec_return_acquire(v);
-}
-
-static __always_inline long
-atomic_long_dec_return_release(atomic_long_t *v)
-{
-       return atomic64_dec_return_release(v);
-}
-
-static __always_inline long
-atomic_long_dec_return_relaxed(atomic_long_t *v)
-{
-       return atomic64_dec_return_relaxed(v);
-}
-
-static __always_inline long
-atomic_long_fetch_dec(atomic_long_t *v)
-{
-       return atomic64_fetch_dec(v);
-}
-
-static __always_inline long
-atomic_long_fetch_dec_acquire(atomic_long_t *v)
-{
-       return atomic64_fetch_dec_acquire(v);
-}
-
-static __always_inline long
-atomic_long_fetch_dec_release(atomic_long_t *v)
-{
-       return atomic64_fetch_dec_release(v);
-}
-
-static __always_inline long
-atomic_long_fetch_dec_relaxed(atomic_long_t *v)
-{
-       return atomic64_fetch_dec_relaxed(v);
-}
-
-static __always_inline void
-atomic_long_and(long i, atomic_long_t *v)
-{
-       atomic64_and(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_and(long i, atomic_long_t *v)
-{
-       return atomic64_fetch_and(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
-{
-       return atomic64_fetch_and_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_and_release(long i, atomic_long_t *v)
-{
-       return atomic64_fetch_and_release(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_and_relaxed(long i, atomic_long_t *v)
-{
-       return atomic64_fetch_and_relaxed(i, v);
-}
-
-static __always_inline void
-atomic_long_andnot(long i, atomic_long_t *v)
-{
-       atomic64_andnot(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_andnot(long i, atomic_long_t *v)
-{
-       return atomic64_fetch_andnot(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
-{
-       return atomic64_fetch_andnot_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
-{
-       return atomic64_fetch_andnot_release(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v)
-{
-       return atomic64_fetch_andnot_relaxed(i, v);
-}
-
-static __always_inline void
-atomic_long_or(long i, atomic_long_t *v)
-{
-       atomic64_or(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_or(long i, atomic_long_t *v)
-{
-       return atomic64_fetch_or(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
-{
-       return atomic64_fetch_or_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_or_release(long i, atomic_long_t *v)
-{
-       return atomic64_fetch_or_release(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_or_relaxed(long i, atomic_long_t *v)
-{
-       return atomic64_fetch_or_relaxed(i, v);
-}
-
-static __always_inline void
-atomic_long_xor(long i, atomic_long_t *v)
-{
-       atomic64_xor(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_xor(long i, atomic_long_t *v)
-{
-       return atomic64_fetch_xor(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
-{
-       return atomic64_fetch_xor_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_xor_release(long i, atomic_long_t *v)
-{
-       return atomic64_fetch_xor_release(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
-{
-       return atomic64_fetch_xor_relaxed(i, v);
-}
-
-static __always_inline long
-atomic_long_xchg(atomic_long_t *v, long i)
-{
-       return atomic64_xchg(v, i);
-}
-
-static __always_inline long
-atomic_long_xchg_acquire(atomic_long_t *v, long i)
-{
-       return atomic64_xchg_acquire(v, i);
-}
-
-static __always_inline long
-atomic_long_xchg_release(atomic_long_t *v, long i)
-{
-       return atomic64_xchg_release(v, i);
-}
-
-static __always_inline long
-atomic_long_xchg_relaxed(atomic_long_t *v, long i)
-{
-       return atomic64_xchg_relaxed(v, i);
-}
-
-static __always_inline long
-atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
-{
-       return atomic64_cmpxchg(v, old, new);
-}
-
-static __always_inline long
-atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
-{
-       return atomic64_cmpxchg_acquire(v, old, new);
-}
-
-static __always_inline long
-atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
-{
-       return atomic64_cmpxchg_release(v, old, new);
-}
-
-static __always_inline long
-atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
-{
-       return atomic64_cmpxchg_relaxed(v, old, new);
-}
-
-static __always_inline bool
-atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
-{
-       return atomic64_try_cmpxchg(v, (s64 *)old, new);
-}
-
-static __always_inline bool
-atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
-{
-       return atomic64_try_cmpxchg_acquire(v, (s64 *)old, new);
-}
-
-static __always_inline bool
-atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
-{
-       return atomic64_try_cmpxchg_release(v, (s64 *)old, new);
-}
-
-static __always_inline bool
-atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
-{
-       return atomic64_try_cmpxchg_relaxed(v, (s64 *)old, new);
-}
-
-static __always_inline bool
-atomic_long_sub_and_test(long i, atomic_long_t *v)
-{
-       return atomic64_sub_and_test(i, v);
-}
-
-static __always_inline bool
-atomic_long_dec_and_test(atomic_long_t *v)
-{
-       return atomic64_dec_and_test(v);
-}
-
-static __always_inline bool
-atomic_long_inc_and_test(atomic_long_t *v)
-{
-       return atomic64_inc_and_test(v);
-}
-
-static __always_inline bool
-atomic_long_add_negative(long i, atomic_long_t *v)
-{
-       return atomic64_add_negative(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
-{
-       return atomic64_fetch_add_unless(v, a, u);
-}
-
-static __always_inline bool
-atomic_long_add_unless(atomic_long_t *v, long a, long u)
-{
-       return atomic64_add_unless(v, a, u);
-}
-
-static __always_inline bool
-atomic_long_inc_not_zero(atomic_long_t *v)
-{
-       return atomic64_inc_not_zero(v);
-}
-
-static __always_inline bool
-atomic_long_inc_unless_negative(atomic_long_t *v)
-{
-       return atomic64_inc_unless_negative(v);
-}
-
-static __always_inline bool
-atomic_long_dec_unless_positive(atomic_long_t *v)
-{
-       return atomic64_dec_unless_positive(v);
-}
-
-static __always_inline long
-atomic_long_dec_if_positive(atomic_long_t *v)
-{
-       return atomic64_dec_if_positive(v);
-}
-
-#else /* CONFIG_64BIT */
-
-static __always_inline long
-atomic_long_read(const atomic_long_t *v)
-{
-       return atomic_read(v);
-}
-
-static __always_inline long
-atomic_long_read_acquire(const atomic_long_t *v)
-{
-       return atomic_read_acquire(v);
-}
-
-static __always_inline void
-atomic_long_set(atomic_long_t *v, long i)
-{
-       atomic_set(v, i);
-}
-
-static __always_inline void
-atomic_long_set_release(atomic_long_t *v, long i)
-{
-       atomic_set_release(v, i);
-}
-
-static __always_inline void
-atomic_long_add(long i, atomic_long_t *v)
-{
-       atomic_add(i, v);
-}
-
-static __always_inline long
-atomic_long_add_return(long i, atomic_long_t *v)
-{
-       return atomic_add_return(i, v);
-}
-
-static __always_inline long
-atomic_long_add_return_acquire(long i, atomic_long_t *v)
-{
-       return atomic_add_return_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_add_return_release(long i, atomic_long_t *v)
-{
-       return atomic_add_return_release(i, v);
-}
-
-static __always_inline long
-atomic_long_add_return_relaxed(long i, atomic_long_t *v)
-{
-       return atomic_add_return_relaxed(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_add(long i, atomic_long_t *v)
-{
-       return atomic_fetch_add(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
-{
-       return atomic_fetch_add_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_add_release(long i, atomic_long_t *v)
-{
-       return atomic_fetch_add_release(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_add_relaxed(long i, atomic_long_t *v)
-{
-       return atomic_fetch_add_relaxed(i, v);
-}
-
-static __always_inline void
-atomic_long_sub(long i, atomic_long_t *v)
-{
-       atomic_sub(i, v);
-}
-
-static __always_inline long
-atomic_long_sub_return(long i, atomic_long_t *v)
-{
-       return atomic_sub_return(i, v);
-}
-
-static __always_inline long
-atomic_long_sub_return_acquire(long i, atomic_long_t *v)
-{
-       return atomic_sub_return_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_sub_return_release(long i, atomic_long_t *v)
-{
-       return atomic_sub_return_release(i, v);
-}
-
-static __always_inline long
-atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
-{
-       return atomic_sub_return_relaxed(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_sub(long i, atomic_long_t *v)
-{
-       return atomic_fetch_sub(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
-{
-       return atomic_fetch_sub_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_sub_release(long i, atomic_long_t *v)
-{
-       return atomic_fetch_sub_release(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v)
-{
-       return atomic_fetch_sub_relaxed(i, v);
-}
-
-static __always_inline void
-atomic_long_inc(atomic_long_t *v)
-{
-       atomic_inc(v);
-}
-
-static __always_inline long
-atomic_long_inc_return(atomic_long_t *v)
-{
-       return atomic_inc_return(v);
-}
-
-static __always_inline long
-atomic_long_inc_return_acquire(atomic_long_t *v)
-{
-       return atomic_inc_return_acquire(v);
-}
-
-static __always_inline long
-atomic_long_inc_return_release(atomic_long_t *v)
-{
-       return atomic_inc_return_release(v);
-}
-
-static __always_inline long
-atomic_long_inc_return_relaxed(atomic_long_t *v)
-{
-       return atomic_inc_return_relaxed(v);
-}
-
-static __always_inline long
-atomic_long_fetch_inc(atomic_long_t *v)
-{
-       return atomic_fetch_inc(v);
-}
-
-static __always_inline long
-atomic_long_fetch_inc_acquire(atomic_long_t *v)
-{
-       return atomic_fetch_inc_acquire(v);
-}
-
-static __always_inline long
-atomic_long_fetch_inc_release(atomic_long_t *v)
-{
-       return atomic_fetch_inc_release(v);
-}
-
-static __always_inline long
-atomic_long_fetch_inc_relaxed(atomic_long_t *v)
-{
-       return atomic_fetch_inc_relaxed(v);
-}
-
-static __always_inline void
-atomic_long_dec(atomic_long_t *v)
-{
-       atomic_dec(v);
-}
-
-static __always_inline long
-atomic_long_dec_return(atomic_long_t *v)
-{
-       return atomic_dec_return(v);
-}
-
-static __always_inline long
-atomic_long_dec_return_acquire(atomic_long_t *v)
-{
-       return atomic_dec_return_acquire(v);
-}
-
-static __always_inline long
-atomic_long_dec_return_release(atomic_long_t *v)
-{
-       return atomic_dec_return_release(v);
-}
-
-static __always_inline long
-atomic_long_dec_return_relaxed(atomic_long_t *v)
-{
-       return atomic_dec_return_relaxed(v);
-}
-
-static __always_inline long
-atomic_long_fetch_dec(atomic_long_t *v)
-{
-       return atomic_fetch_dec(v);
-}
-
-static __always_inline long
-atomic_long_fetch_dec_acquire(atomic_long_t *v)
-{
-       return atomic_fetch_dec_acquire(v);
-}
-
-static __always_inline long
-atomic_long_fetch_dec_release(atomic_long_t *v)
-{
-       return atomic_fetch_dec_release(v);
-}
-
-static __always_inline long
-atomic_long_fetch_dec_relaxed(atomic_long_t *v)
-{
-       return atomic_fetch_dec_relaxed(v);
-}
-
-static __always_inline void
-atomic_long_and(long i, atomic_long_t *v)
-{
-       atomic_and(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_and(long i, atomic_long_t *v)
-{
-       return atomic_fetch_and(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
-{
-       return atomic_fetch_and_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_and_release(long i, atomic_long_t *v)
-{
-       return atomic_fetch_and_release(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_and_relaxed(long i, atomic_long_t *v)
-{
-       return atomic_fetch_and_relaxed(i, v);
-}
-
-static __always_inline void
-atomic_long_andnot(long i, atomic_long_t *v)
-{
-       atomic_andnot(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_andnot(long i, atomic_long_t *v)
-{
-       return atomic_fetch_andnot(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
-{
-       return atomic_fetch_andnot_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
-{
-       return atomic_fetch_andnot_release(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v)
-{
-       return atomic_fetch_andnot_relaxed(i, v);
-}
-
-static __always_inline void
-atomic_long_or(long i, atomic_long_t *v)
-{
-       atomic_or(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_or(long i, atomic_long_t *v)
-{
-       return atomic_fetch_or(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
-{
-       return atomic_fetch_or_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_or_release(long i, atomic_long_t *v)
-{
-       return atomic_fetch_or_release(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_or_relaxed(long i, atomic_long_t *v)
-{
-       return atomic_fetch_or_relaxed(i, v);
-}
-
-static __always_inline void
-atomic_long_xor(long i, atomic_long_t *v)
-{
-       atomic_xor(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_xor(long i, atomic_long_t *v)
-{
-       return atomic_fetch_xor(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
-{
-       return atomic_fetch_xor_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_xor_release(long i, atomic_long_t *v)
-{
-       return atomic_fetch_xor_release(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
-{
-       return atomic_fetch_xor_relaxed(i, v);
-}
-
-static __always_inline long
-atomic_long_xchg(atomic_long_t *v, long i)
-{
-       return atomic_xchg(v, i);
-}
-
-static __always_inline long
-atomic_long_xchg_acquire(atomic_long_t *v, long i)
-{
-       return atomic_xchg_acquire(v, i);
-}
-
-static __always_inline long
-atomic_long_xchg_release(atomic_long_t *v, long i)
-{
-       return atomic_xchg_release(v, i);
-}
-
-static __always_inline long
-atomic_long_xchg_relaxed(atomic_long_t *v, long i)
-{
-       return atomic_xchg_relaxed(v, i);
-}
-
-static __always_inline long
-atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
-{
-       return atomic_cmpxchg(v, old, new);
-}
-
-static __always_inline long
-atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
-{
-       return atomic_cmpxchg_acquire(v, old, new);
-}
-
-static __always_inline long
-atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
-{
-       return atomic_cmpxchg_release(v, old, new);
-}
-
-static __always_inline long
-atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
-{
-       return atomic_cmpxchg_relaxed(v, old, new);
-}
-
-static __always_inline bool
-atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
-{
-       return atomic_try_cmpxchg(v, (int *)old, new);
-}
-
-static __always_inline bool
-atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
-{
-       return atomic_try_cmpxchg_acquire(v, (int *)old, new);
-}
-
-static __always_inline bool
-atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
-{
-       return atomic_try_cmpxchg_release(v, (int *)old, new);
-}
-
-static __always_inline bool
-atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
-{
-       return atomic_try_cmpxchg_relaxed(v, (int *)old, new);
-}
-
-static __always_inline bool
-atomic_long_sub_and_test(long i, atomic_long_t *v)
-{
-       return atomic_sub_and_test(i, v);
-}
-
-static __always_inline bool
-atomic_long_dec_and_test(atomic_long_t *v)
-{
-       return atomic_dec_and_test(v);
-}
-
-static __always_inline bool
-atomic_long_inc_and_test(atomic_long_t *v)
-{
-       return atomic_inc_and_test(v);
-}
-
-static __always_inline bool
-atomic_long_add_negative(long i, atomic_long_t *v)
-{
-       return atomic_add_negative(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
-{
-       return atomic_fetch_add_unless(v, a, u);
-}
-
-static __always_inline bool
-atomic_long_add_unless(atomic_long_t *v, long a, long u)
-{
-       return atomic_add_unless(v, a, u);
-}
-
-static __always_inline bool
-atomic_long_inc_not_zero(atomic_long_t *v)
-{
-       return atomic_inc_not_zero(v);
-}
-
-static __always_inline bool
-atomic_long_inc_unless_negative(atomic_long_t *v)
-{
-       return atomic_inc_unless_negative(v);
-}
-
-static __always_inline bool
-atomic_long_dec_unless_positive(atomic_long_t *v)
-{
-       return atomic_dec_unless_positive(v);
-}
-
-static __always_inline long
-atomic_long_dec_if_positive(atomic_long_t *v)
-{
-       return atomic_dec_if_positive(v);
-}
-
-#endif /* CONFIG_64BIT */
-#endif /* _ASM_GENERIC_ATOMIC_LONG_H */
-// a624200981f552b2c6be4f32fe44da8289f30d87
index 0e7316a..3096f08 100644 (file)
  * See Documentation/atomic_bitops.txt for details.
  */
 
-static __always_inline void set_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline void
+arch_set_bit(unsigned int nr, volatile unsigned long *p)
 {
        p += BIT_WORD(nr);
-       atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p);
+       arch_atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p);
 }
 
-static __always_inline void clear_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline void
+arch_clear_bit(unsigned int nr, volatile unsigned long *p)
 {
        p += BIT_WORD(nr);
-       atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p);
+       arch_atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p);
 }
 
-static __always_inline void change_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline void
+arch_change_bit(unsigned int nr, volatile unsigned long *p)
 {
        p += BIT_WORD(nr);
-       atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p);
+       arch_atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p);
 }
 
-static inline int test_and_set_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline int
+arch_test_and_set_bit(unsigned int nr, volatile unsigned long *p)
 {
        long old;
        unsigned long mask = BIT_MASK(nr);
@@ -38,11 +42,12 @@ static inline int test_and_set_bit(unsigned int nr, volatile unsigned long *p)
        if (READ_ONCE(*p) & mask)
                return 1;
 
-       old = atomic_long_fetch_or(mask, (atomic_long_t *)p);
+       old = arch_atomic_long_fetch_or(mask, (atomic_long_t *)p);
        return !!(old & mask);
 }
 
-static inline int test_and_clear_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline int
+arch_test_and_clear_bit(unsigned int nr, volatile unsigned long *p)
 {
        long old;
        unsigned long mask = BIT_MASK(nr);
@@ -51,18 +56,21 @@ static inline int test_and_clear_bit(unsigned int nr, volatile unsigned long *p)
        if (!(READ_ONCE(*p) & mask))
                return 0;
 
-       old = atomic_long_fetch_andnot(mask, (atomic_long_t *)p);
+       old = arch_atomic_long_fetch_andnot(mask, (atomic_long_t *)p);
        return !!(old & mask);
 }
 
-static inline int test_and_change_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline int
+arch_test_and_change_bit(unsigned int nr, volatile unsigned long *p)
 {
        long old;
        unsigned long mask = BIT_MASK(nr);
 
        p += BIT_WORD(nr);
-       old = atomic_long_fetch_xor(mask, (atomic_long_t *)p);
+       old = arch_atomic_long_fetch_xor(mask, (atomic_long_t *)p);
        return !!(old & mask);
 }
 
+#include <asm-generic/bitops/instrumented-atomic.h>
+
 #endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */
index 3ae0213..630f2f6 100644 (file)
@@ -7,7 +7,7 @@
 #include <asm/barrier.h>
 
 /**
- * test_and_set_bit_lock - Set a bit and return its old value, for lock
+ * arch_test_and_set_bit_lock - Set a bit and return its old value, for lock
  * @nr: Bit to set
  * @addr: Address to count from
  *
@@ -15,8 +15,8 @@
  * the returned value is 0.
  * It can be used to implement bit locks.
  */
-static inline int test_and_set_bit_lock(unsigned int nr,
-                                       volatile unsigned long *p)
+static __always_inline int
+arch_test_and_set_bit_lock(unsigned int nr, volatile unsigned long *p)
 {
        long old;
        unsigned long mask = BIT_MASK(nr);
@@ -25,26 +25,27 @@ static inline int test_and_set_bit_lock(unsigned int nr,
        if (READ_ONCE(*p) & mask)
                return 1;
 
-       old = atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p);
+       old = arch_atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p);
        return !!(old & mask);
 }
 
 
 /**
- * clear_bit_unlock - Clear a bit in memory, for unlock
+ * arch_clear_bit_unlock - Clear a bit in memory, for unlock
  * @nr: the bit to set
  * @addr: the address to start counting from
  *
  * This operation is atomic and provides release barrier semantics.
  */
-static inline void clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
+static __always_inline void
+arch_clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
 {
        p += BIT_WORD(nr);
-       atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p);
+       arch_atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p);
 }
 
 /**
- * __clear_bit_unlock - Clear a bit in memory, for unlock
+ * arch___clear_bit_unlock - Clear a bit in memory, for unlock
  * @nr: the bit to set
  * @addr: the address to start counting from
  *
@@ -54,38 +55,40 @@ static inline void clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
  *
  * See for example x86's implementation.
  */
-static inline void __clear_bit_unlock(unsigned int nr,
-                                     volatile unsigned long *p)
+static inline void
+arch___clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
 {
        unsigned long old;
 
        p += BIT_WORD(nr);
        old = READ_ONCE(*p);
        old &= ~BIT_MASK(nr);
-       atomic_long_set_release((atomic_long_t *)p, old);
+       arch_atomic_long_set_release((atomic_long_t *)p, old);
 }
 
 /**
- * clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom
- *                                     byte is negative, for unlock.
+ * arch_clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom
+ *                                          byte is negative, for unlock.
  * @nr: the bit to clear
  * @addr: the address to start counting from
  *
  * This is a bit of a one-trick-pony for the filemap code, which clears
  * PG_locked and tests PG_waiters,
  */
-#ifndef clear_bit_unlock_is_negative_byte
-static inline bool clear_bit_unlock_is_negative_byte(unsigned int nr,
-                                                    volatile unsigned long *p)
+#ifndef arch_clear_bit_unlock_is_negative_byte
+static inline bool arch_clear_bit_unlock_is_negative_byte(unsigned int nr,
+                                                         volatile unsigned long *p)
 {
        long old;
        unsigned long mask = BIT_MASK(nr);
 
        p += BIT_WORD(nr);
-       old = atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p);
+       old = arch_atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p);
        return !!(old & BIT(7));
 }
-#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte
+#define arch_clear_bit_unlock_is_negative_byte arch_clear_bit_unlock_is_negative_byte
 #endif
 
+#include <asm-generic/bitops/instrumented-lock.h>
+
 #endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */
index 7e10c4b..365377f 100644 (file)
@@ -5,7 +5,7 @@
 #include <asm/types.h>
 
 /**
- * __set_bit - Set a bit in memory
+ * arch___set_bit - Set a bit in memory
  * @nr: the bit to set
  * @addr: the address to start counting from
  *
  * If it's called on the same region of memory simultaneously, the effect
  * may be that only one operation succeeds.
  */
-static inline void __set_bit(int nr, volatile unsigned long *addr)
+static __always_inline void
+arch___set_bit(int nr, volatile unsigned long *addr)
 {
        unsigned long mask = BIT_MASK(nr);
        unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
 
        *p  |= mask;
 }
+#define __set_bit arch___set_bit
 
-static inline void __clear_bit(int nr, volatile unsigned long *addr)
+static __always_inline void
+arch___clear_bit(int nr, volatile unsigned long *addr)
 {
        unsigned long mask = BIT_MASK(nr);
        unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
 
        *p &= ~mask;
 }
+#define __clear_bit arch___clear_bit
 
 /**
- * __change_bit - Toggle a bit in memory
+ * arch___change_bit - Toggle a bit in memory
  * @nr: the bit to change
  * @addr: the address to start counting from
  *
@@ -38,16 +42,18 @@ static inline void __clear_bit(int nr, volatile unsigned long *addr)
  * If it's called on the same region of memory simultaneously, the effect
  * may be that only one operation succeeds.
  */
-static inline void __change_bit(int nr, volatile unsigned long *addr)
+static __always_inline
+void arch___change_bit(int nr, volatile unsigned long *addr)
 {
        unsigned long mask = BIT_MASK(nr);
        unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
 
        *p ^= mask;
 }
+#define __change_bit arch___change_bit
 
 /**
- * __test_and_set_bit - Set a bit and return its old value
+ * arch___test_and_set_bit - Set a bit and return its old value
  * @nr: Bit to set
  * @addr: Address to count from
  *
@@ -55,7 +61,8 @@ static inline void __change_bit(int nr, volatile unsigned long *addr)
  * If two examples of this operation race, one can appear to succeed
  * but actually fail.  You must protect multiple accesses with a lock.
  */
-static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
+static __always_inline int
+arch___test_and_set_bit(int nr, volatile unsigned long *addr)
 {
        unsigned long mask = BIT_MASK(nr);
        unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
@@ -64,9 +71,10 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
        *p = old | mask;
        return (old & mask) != 0;
 }
+#define __test_and_set_bit arch___test_and_set_bit
 
 /**
- * __test_and_clear_bit - Clear a bit and return its old value
+ * arch___test_and_clear_bit - Clear a bit and return its old value
  * @nr: Bit to clear
  * @addr: Address to count from
  *
@@ -74,7 +82,8 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
  * If two examples of this operation race, one can appear to succeed
  * but actually fail.  You must protect multiple accesses with a lock.
  */
-static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
+static __always_inline int
+arch___test_and_clear_bit(int nr, volatile unsigned long *addr)
 {
        unsigned long mask = BIT_MASK(nr);
        unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
@@ -83,10 +92,11 @@ static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
        *p = old & ~mask;
        return (old & mask) != 0;
 }
+#define __test_and_clear_bit arch___test_and_clear_bit
 
 /* WARNING: non atomic and it can be reordered! */
-static inline int __test_and_change_bit(int nr,
-                                           volatile unsigned long *addr)
+static __always_inline int
+arch___test_and_change_bit(int nr, volatile unsigned long *addr)
 {
        unsigned long mask = BIT_MASK(nr);
        unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
@@ -95,15 +105,18 @@ static inline int __test_and_change_bit(int nr,
        *p = old ^ mask;
        return (old & mask) != 0;
 }
+#define __test_and_change_bit arch___test_and_change_bit
 
 /**
- * test_bit - Determine whether a bit is set
+ * arch_test_bit - Determine whether a bit is set
  * @nr: bit number to test
  * @addr: Address to start counting from
  */
-static inline int test_bit(int nr, const volatile unsigned long *addr)
+static __always_inline int
+arch_test_bit(int nr, const volatile unsigned long *addr)
 {
        return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
 }
+#define test_bit arch_test_bit
 
 #endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */
index 1732541..62669b3 100644 (file)
                NOINSTR_TEXT                                            \
                *(.text..refcount)                                      \
                *(.ref.text)                                            \
+               *(.text.asan.* .text.tsan.*)                            \
                TEXT_CFI_JT                                             \
        MEM_KEEP(init.text*)                                            \
        MEM_KEEP(exit.text*)                                            \
index 47accec..f603325 100644 (file)
@@ -38,9 +38,9 @@ extern void public_key_free(struct public_key *key);
 struct public_key_signature {
        struct asymmetric_key_id *auth_ids[2];
        u8 *s;                  /* Signature */
-       u32 s_size;             /* Number of bytes in signature */
        u8 *digest;
-       u8 digest_size;         /* Number of bytes in digest */
+       u32 s_size;             /* Number of bytes in signature */
+       u32 digest_size;        /* Number of bytes in digest */
        const char *pkey_algo;
        const char *hash_algo;
        const char *encoding;
index 7afd730..709f286 100644 (file)
@@ -3,6 +3,7 @@
 /*
  * Common values for the SM4 algorithm
  * Copyright (C) 2018 ARM Limited or its affiliates.
+ * Copyright (c) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
  */
 
 #ifndef _CRYPTO_SM4_H
 #define SM4_BLOCK_SIZE 16
 #define SM4_RKEY_WORDS 32
 
-struct crypto_sm4_ctx {
+struct sm4_ctx {
        u32 rkey_enc[SM4_RKEY_WORDS];
        u32 rkey_dec[SM4_RKEY_WORDS];
 };
 
-int crypto_sm4_set_key(struct crypto_tfm *tfm, const u8 *in_key,
-                      unsigned int key_len);
-int crypto_sm4_expand_key(struct crypto_sm4_ctx *ctx, const u8 *in_key,
+/**
+ * sm4_expandkey - Expands the SM4 key as described in GB/T 32907-2016
+ * @ctx:       The location where the computed key will be stored.
+ * @in_key:    The supplied key.
+ * @key_len:   The length of the supplied key.
+ *
+ * Returns 0 on success. The function fails only if an invalid key size (or
+ * pointer) is supplied.
+ */
+int sm4_expandkey(struct sm4_ctx *ctx, const u8 *in_key,
                          unsigned int key_len);
 
-void crypto_sm4_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in);
-void crypto_sm4_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in);
+/**
+ * sm4_crypt_block - Encrypt or decrypt a single SM4 block
+ * @rk:                The rkey_enc for encrypt or rkey_dec for decrypt
+ * @out:       Buffer to store output data
+ * @in:        Buffer containing the input data
+ */
+void sm4_crypt_block(const u32 *rk, u8 *out, const u8 *in);
 
 #endif
index 063791b..d7aa42c 100644 (file)
 #define OST_CLK_PERCPU_TIMER2  3
 #define OST_CLK_PERCPU_TIMER3  4
 
+#define OST_CLK_EVENT_TIMER            1
+
+#define OST_CLK_EVENT_TIMER0   0
+#define OST_CLK_EVENT_TIMER1   1
+#define OST_CLK_EVENT_TIMER2   2
+#define OST_CLK_EVENT_TIMER3   3
+#define OST_CLK_EVENT_TIMER4   4
+#define OST_CLK_EVENT_TIMER5   5
+#define OST_CLK_EVENT_TIMER6   6
+#define OST_CLK_EVENT_TIMER7   7
+#define OST_CLK_EVENT_TIMER8   8
+#define OST_CLK_EVENT_TIMER9   9
+#define OST_CLK_EVENT_TIMER10  10
+#define OST_CLK_EVENT_TIMER11  11
+#define OST_CLK_EVENT_TIMER12  12
+#define OST_CLK_EVENT_TIMER13  13
+#define OST_CLK_EVENT_TIMER14  14
+#define OST_CLK_EVENT_TIMER15  15
+
 #endif /* __DT_BINDINGS_CLOCK_INGENIC_OST_H__ */
index d918bf3..3205699 100644 (file)
@@ -16,4 +16,8 @@
 #define SMB3XX_CHG_ENABLE_PIN_ACTIVE_LOW       1
 #define SMB3XX_CHG_ENABLE_PIN_ACTIVE_HIGH      2
 
+/* Polarity of INOK signal */
+#define SMB3XX_SYSOK_INOK_ACTIVE_LOW           0
+#define SMB3XX_SYSOK_INOK_ACTIVE_HIGH          1
+
 #endif
diff --git a/include/linux/atomic-arch-fallback.h b/include/linux/atomic-arch-fallback.h
deleted file mode 100644 (file)
index a3dba31..0000000
+++ /dev/null
@@ -1,2361 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-// Generated by scripts/atomic/gen-atomic-fallback.sh
-// DO NOT MODIFY THIS FILE DIRECTLY
-
-#ifndef _LINUX_ATOMIC_FALLBACK_H
-#define _LINUX_ATOMIC_FALLBACK_H
-
-#include <linux/compiler.h>
-
-#ifndef arch_xchg_relaxed
-#define arch_xchg_acquire arch_xchg
-#define arch_xchg_release arch_xchg
-#define arch_xchg_relaxed arch_xchg
-#else /* arch_xchg_relaxed */
-
-#ifndef arch_xchg_acquire
-#define arch_xchg_acquire(...) \
-       __atomic_op_acquire(arch_xchg, __VA_ARGS__)
-#endif
-
-#ifndef arch_xchg_release
-#define arch_xchg_release(...) \
-       __atomic_op_release(arch_xchg, __VA_ARGS__)
-#endif
-
-#ifndef arch_xchg
-#define arch_xchg(...) \
-       __atomic_op_fence(arch_xchg, __VA_ARGS__)
-#endif
-
-#endif /* arch_xchg_relaxed */
-
-#ifndef arch_cmpxchg_relaxed
-#define arch_cmpxchg_acquire arch_cmpxchg
-#define arch_cmpxchg_release arch_cmpxchg
-#define arch_cmpxchg_relaxed arch_cmpxchg
-#else /* arch_cmpxchg_relaxed */
-
-#ifndef arch_cmpxchg_acquire
-#define arch_cmpxchg_acquire(...) \
-       __atomic_op_acquire(arch_cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef arch_cmpxchg_release
-#define arch_cmpxchg_release(...) \
-       __atomic_op_release(arch_cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef arch_cmpxchg
-#define arch_cmpxchg(...) \
-       __atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
-#endif
-
-#endif /* arch_cmpxchg_relaxed */
-
-#ifndef arch_cmpxchg64_relaxed
-#define arch_cmpxchg64_acquire arch_cmpxchg64
-#define arch_cmpxchg64_release arch_cmpxchg64
-#define arch_cmpxchg64_relaxed arch_cmpxchg64
-#else /* arch_cmpxchg64_relaxed */
-
-#ifndef arch_cmpxchg64_acquire
-#define arch_cmpxchg64_acquire(...) \
-       __atomic_op_acquire(arch_cmpxchg64, __VA_ARGS__)
-#endif
-
-#ifndef arch_cmpxchg64_release
-#define arch_cmpxchg64_release(...) \
-       __atomic_op_release(arch_cmpxchg64, __VA_ARGS__)
-#endif
-
-#ifndef arch_cmpxchg64
-#define arch_cmpxchg64(...) \
-       __atomic_op_fence(arch_cmpxchg64, __VA_ARGS__)
-#endif
-
-#endif /* arch_cmpxchg64_relaxed */
-
-#ifndef arch_try_cmpxchg_relaxed
-#ifdef arch_try_cmpxchg
-#define arch_try_cmpxchg_acquire arch_try_cmpxchg
-#define arch_try_cmpxchg_release arch_try_cmpxchg
-#define arch_try_cmpxchg_relaxed arch_try_cmpxchg
-#endif /* arch_try_cmpxchg */
-
-#ifndef arch_try_cmpxchg
-#define arch_try_cmpxchg(_ptr, _oldp, _new) \
-({ \
-       typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
-       ___r = arch_cmpxchg((_ptr), ___o, (_new)); \
-       if (unlikely(___r != ___o)) \
-               *___op = ___r; \
-       likely(___r == ___o); \
-})
-#endif /* arch_try_cmpxchg */
-
-#ifndef arch_try_cmpxchg_acquire
-#define arch_try_cmpxchg_acquire(_ptr, _oldp, _new) \
-({ \
-       typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
-       ___r = arch_cmpxchg_acquire((_ptr), ___o, (_new)); \
-       if (unlikely(___r != ___o)) \
-               *___op = ___r; \
-       likely(___r == ___o); \
-})
-#endif /* arch_try_cmpxchg_acquire */
-
-#ifndef arch_try_cmpxchg_release
-#define arch_try_cmpxchg_release(_ptr, _oldp, _new) \
-({ \
-       typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
-       ___r = arch_cmpxchg_release((_ptr), ___o, (_new)); \
-       if (unlikely(___r != ___o)) \
-               *___op = ___r; \
-       likely(___r == ___o); \
-})
-#endif /* arch_try_cmpxchg_release */
-
-#ifndef arch_try_cmpxchg_relaxed
-#define arch_try_cmpxchg_relaxed(_ptr, _oldp, _new) \
-({ \
-       typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
-       ___r = arch_cmpxchg_relaxed((_ptr), ___o, (_new)); \
-       if (unlikely(___r != ___o)) \
-               *___op = ___r; \
-       likely(___r == ___o); \
-})
-#endif /* arch_try_cmpxchg_relaxed */
-
-#else /* arch_try_cmpxchg_relaxed */
-
-#ifndef arch_try_cmpxchg_acquire
-#define arch_try_cmpxchg_acquire(...) \
-       __atomic_op_acquire(arch_try_cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef arch_try_cmpxchg_release
-#define arch_try_cmpxchg_release(...) \
-       __atomic_op_release(arch_try_cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef arch_try_cmpxchg
-#define arch_try_cmpxchg(...) \
-       __atomic_op_fence(arch_try_cmpxchg, __VA_ARGS__)
-#endif
-
-#endif /* arch_try_cmpxchg_relaxed */
-
-#ifndef arch_atomic_read_acquire
-static __always_inline int
-arch_atomic_read_acquire(const atomic_t *v)
-{
-       return smp_load_acquire(&(v)->counter);
-}
-#define arch_atomic_read_acquire arch_atomic_read_acquire
-#endif
-
-#ifndef arch_atomic_set_release
-static __always_inline void
-arch_atomic_set_release(atomic_t *v, int i)
-{
-       smp_store_release(&(v)->counter, i);
-}
-#define arch_atomic_set_release arch_atomic_set_release
-#endif
-
-#ifndef arch_atomic_add_return_relaxed
-#define arch_atomic_add_return_acquire arch_atomic_add_return
-#define arch_atomic_add_return_release arch_atomic_add_return
-#define arch_atomic_add_return_relaxed arch_atomic_add_return
-#else /* arch_atomic_add_return_relaxed */
-
-#ifndef arch_atomic_add_return_acquire
-static __always_inline int
-arch_atomic_add_return_acquire(int i, atomic_t *v)
-{
-       int ret = arch_atomic_add_return_relaxed(i, v);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic_add_return_acquire arch_atomic_add_return_acquire
-#endif
-
-#ifndef arch_atomic_add_return_release
-static __always_inline int
-arch_atomic_add_return_release(int i, atomic_t *v)
-{
-       __atomic_release_fence();
-       return arch_atomic_add_return_relaxed(i, v);
-}
-#define arch_atomic_add_return_release arch_atomic_add_return_release
-#endif
-
-#ifndef arch_atomic_add_return
-static __always_inline int
-arch_atomic_add_return(int i, atomic_t *v)
-{
-       int ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic_add_return_relaxed(i, v);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic_add_return arch_atomic_add_return
-#endif
-
-#endif /* arch_atomic_add_return_relaxed */
-
-#ifndef arch_atomic_fetch_add_relaxed
-#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add
-#define arch_atomic_fetch_add_release arch_atomic_fetch_add
-#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add
-#else /* arch_atomic_fetch_add_relaxed */
-
-#ifndef arch_atomic_fetch_add_acquire
-static __always_inline int
-arch_atomic_fetch_add_acquire(int i, atomic_t *v)
-{
-       int ret = arch_atomic_fetch_add_relaxed(i, v);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add_acquire
-#endif
-
-#ifndef arch_atomic_fetch_add_release
-static __always_inline int
-arch_atomic_fetch_add_release(int i, atomic_t *v)
-{
-       __atomic_release_fence();
-       return arch_atomic_fetch_add_relaxed(i, v);
-}
-#define arch_atomic_fetch_add_release arch_atomic_fetch_add_release
-#endif
-
-#ifndef arch_atomic_fetch_add
-static __always_inline int
-arch_atomic_fetch_add(int i, atomic_t *v)
-{
-       int ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic_fetch_add_relaxed(i, v);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic_fetch_add arch_atomic_fetch_add
-#endif
-
-#endif /* arch_atomic_fetch_add_relaxed */
-
-#ifndef arch_atomic_sub_return_relaxed
-#define arch_atomic_sub_return_acquire arch_atomic_sub_return
-#define arch_atomic_sub_return_release arch_atomic_sub_return
-#define arch_atomic_sub_return_relaxed arch_atomic_sub_return
-#else /* arch_atomic_sub_return_relaxed */
-
-#ifndef arch_atomic_sub_return_acquire
-static __always_inline int
-arch_atomic_sub_return_acquire(int i, atomic_t *v)
-{
-       int ret = arch_atomic_sub_return_relaxed(i, v);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic_sub_return_acquire arch_atomic_sub_return_acquire
-#endif
-
-#ifndef arch_atomic_sub_return_release
-static __always_inline int
-arch_atomic_sub_return_release(int i, atomic_t *v)
-{
-       __atomic_release_fence();
-       return arch_atomic_sub_return_relaxed(i, v);
-}
-#define arch_atomic_sub_return_release arch_atomic_sub_return_release
-#endif
-
-#ifndef arch_atomic_sub_return
-static __always_inline int
-arch_atomic_sub_return(int i, atomic_t *v)
-{
-       int ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic_sub_return_relaxed(i, v);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic_sub_return arch_atomic_sub_return
-#endif
-
-#endif /* arch_atomic_sub_return_relaxed */
-
-#ifndef arch_atomic_fetch_sub_relaxed
-#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub
-#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub
-#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub
-#else /* arch_atomic_fetch_sub_relaxed */
-
-#ifndef arch_atomic_fetch_sub_acquire
-static __always_inline int
-arch_atomic_fetch_sub_acquire(int i, atomic_t *v)
-{
-       int ret = arch_atomic_fetch_sub_relaxed(i, v);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub_acquire
-#endif
-
-#ifndef arch_atomic_fetch_sub_release
-static __always_inline int
-arch_atomic_fetch_sub_release(int i, atomic_t *v)
-{
-       __atomic_release_fence();
-       return arch_atomic_fetch_sub_relaxed(i, v);
-}
-#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub_release
-#endif
-
-#ifndef arch_atomic_fetch_sub
-static __always_inline int
-arch_atomic_fetch_sub(int i, atomic_t *v)
-{
-       int ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic_fetch_sub_relaxed(i, v);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic_fetch_sub arch_atomic_fetch_sub
-#endif
-
-#endif /* arch_atomic_fetch_sub_relaxed */
-
-#ifndef arch_atomic_inc
-static __always_inline void
-arch_atomic_inc(atomic_t *v)
-{
-       arch_atomic_add(1, v);
-}
-#define arch_atomic_inc arch_atomic_inc
-#endif
-
-#ifndef arch_atomic_inc_return_relaxed
-#ifdef arch_atomic_inc_return
-#define arch_atomic_inc_return_acquire arch_atomic_inc_return
-#define arch_atomic_inc_return_release arch_atomic_inc_return
-#define arch_atomic_inc_return_relaxed arch_atomic_inc_return
-#endif /* arch_atomic_inc_return */
-
-#ifndef arch_atomic_inc_return
-static __always_inline int
-arch_atomic_inc_return(atomic_t *v)
-{
-       return arch_atomic_add_return(1, v);
-}
-#define arch_atomic_inc_return arch_atomic_inc_return
-#endif
-
-#ifndef arch_atomic_inc_return_acquire
-static __always_inline int
-arch_atomic_inc_return_acquire(atomic_t *v)
-{
-       return arch_atomic_add_return_acquire(1, v);
-}
-#define arch_atomic_inc_return_acquire arch_atomic_inc_return_acquire
-#endif
-
-#ifndef arch_atomic_inc_return_release
-static __always_inline int
-arch_atomic_inc_return_release(atomic_t *v)
-{
-       return arch_atomic_add_return_release(1, v);
-}
-#define arch_atomic_inc_return_release arch_atomic_inc_return_release
-#endif
-
-#ifndef arch_atomic_inc_return_relaxed
-static __always_inline int
-arch_atomic_inc_return_relaxed(atomic_t *v)
-{
-       return arch_atomic_add_return_relaxed(1, v);
-}
-#define arch_atomic_inc_return_relaxed arch_atomic_inc_return_relaxed
-#endif
-
-#else /* arch_atomic_inc_return_relaxed */
-
-#ifndef arch_atomic_inc_return_acquire
-static __always_inline int
-arch_atomic_inc_return_acquire(atomic_t *v)
-{
-       int ret = arch_atomic_inc_return_relaxed(v);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic_inc_return_acquire arch_atomic_inc_return_acquire
-#endif
-
-#ifndef arch_atomic_inc_return_release
-static __always_inline int
-arch_atomic_inc_return_release(atomic_t *v)
-{
-       __atomic_release_fence();
-       return arch_atomic_inc_return_relaxed(v);
-}
-#define arch_atomic_inc_return_release arch_atomic_inc_return_release
-#endif
-
-#ifndef arch_atomic_inc_return
-static __always_inline int
-arch_atomic_inc_return(atomic_t *v)
-{
-       int ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic_inc_return_relaxed(v);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic_inc_return arch_atomic_inc_return
-#endif
-
-#endif /* arch_atomic_inc_return_relaxed */
-
-#ifndef arch_atomic_fetch_inc_relaxed
-#ifdef arch_atomic_fetch_inc
-#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc
-#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc
-#define arch_atomic_fetch_inc_relaxed arch_atomic_fetch_inc
-#endif /* arch_atomic_fetch_inc */
-
-#ifndef arch_atomic_fetch_inc
-static __always_inline int
-arch_atomic_fetch_inc(atomic_t *v)
-{
-       return arch_atomic_fetch_add(1, v);
-}
-#define arch_atomic_fetch_inc arch_atomic_fetch_inc
-#endif
-
-#ifndef arch_atomic_fetch_inc_acquire
-static __always_inline int
-arch_atomic_fetch_inc_acquire(atomic_t *v)
-{
-       return arch_atomic_fetch_add_acquire(1, v);
-}
-#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc_acquire
-#endif
-
-#ifndef arch_atomic_fetch_inc_release
-static __always_inline int
-arch_atomic_fetch_inc_release(atomic_t *v)
-{
-       return arch_atomic_fetch_add_release(1, v);
-}
-#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc_release
-#endif
-
-#ifndef arch_atomic_fetch_inc_relaxed
-static __always_inline int
-arch_atomic_fetch_inc_relaxed(atomic_t *v)
-{
-       return arch_atomic_fetch_add_relaxed(1, v);
-}
-#define arch_atomic_fetch_inc_relaxed arch_atomic_fetch_inc_relaxed
-#endif
-
-#else /* arch_atomic_fetch_inc_relaxed */
-
-#ifndef arch_atomic_fetch_inc_acquire
-static __always_inline int
-arch_atomic_fetch_inc_acquire(atomic_t *v)
-{
-       int ret = arch_atomic_fetch_inc_relaxed(v);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc_acquire
-#endif
-
-#ifndef arch_atomic_fetch_inc_release
-static __always_inline int
-arch_atomic_fetch_inc_release(atomic_t *v)
-{
-       __atomic_release_fence();
-       return arch_atomic_fetch_inc_relaxed(v);
-}
-#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc_release
-#endif
-
-#ifndef arch_atomic_fetch_inc
-static __always_inline int
-arch_atomic_fetch_inc(atomic_t *v)
-{
-       int ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic_fetch_inc_relaxed(v);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic_fetch_inc arch_atomic_fetch_inc
-#endif
-
-#endif /* arch_atomic_fetch_inc_relaxed */
-
-#ifndef arch_atomic_dec
-static __always_inline void
-arch_atomic_dec(atomic_t *v)
-{
-       arch_atomic_sub(1, v);
-}
-#define arch_atomic_dec arch_atomic_dec
-#endif
-
-#ifndef arch_atomic_dec_return_relaxed
-#ifdef arch_atomic_dec_return
-#define arch_atomic_dec_return_acquire arch_atomic_dec_return
-#define arch_atomic_dec_return_release arch_atomic_dec_return
-#define arch_atomic_dec_return_relaxed arch_atomic_dec_return
-#endif /* arch_atomic_dec_return */
-
-#ifndef arch_atomic_dec_return
-static __always_inline int
-arch_atomic_dec_return(atomic_t *v)
-{
-       return arch_atomic_sub_return(1, v);
-}
-#define arch_atomic_dec_return arch_atomic_dec_return
-#endif
-
-#ifndef arch_atomic_dec_return_acquire
-static __always_inline int
-arch_atomic_dec_return_acquire(atomic_t *v)
-{
-       return arch_atomic_sub_return_acquire(1, v);
-}
-#define arch_atomic_dec_return_acquire arch_atomic_dec_return_acquire
-#endif
-
-#ifndef arch_atomic_dec_return_release
-static __always_inline int
-arch_atomic_dec_return_release(atomic_t *v)
-{
-       return arch_atomic_sub_return_release(1, v);
-}
-#define arch_atomic_dec_return_release arch_atomic_dec_return_release
-#endif
-
-#ifndef arch_atomic_dec_return_relaxed
-static __always_inline int
-arch_atomic_dec_return_relaxed(atomic_t *v)
-{
-       return arch_atomic_sub_return_relaxed(1, v);
-}
-#define arch_atomic_dec_return_relaxed arch_atomic_dec_return_relaxed
-#endif
-
-#else /* arch_atomic_dec_return_relaxed */
-
-#ifndef arch_atomic_dec_return_acquire
-static __always_inline int
-arch_atomic_dec_return_acquire(atomic_t *v)
-{
-       int ret = arch_atomic_dec_return_relaxed(v);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic_dec_return_acquire arch_atomic_dec_return_acquire
-#endif
-
-#ifndef arch_atomic_dec_return_release
-static __always_inline int
-arch_atomic_dec_return_release(atomic_t *v)
-{
-       __atomic_release_fence();
-       return arch_atomic_dec_return_relaxed(v);
-}
-#define arch_atomic_dec_return_release arch_atomic_dec_return_release
-#endif
-
-#ifndef arch_atomic_dec_return
-static __always_inline int
-arch_atomic_dec_return(atomic_t *v)
-{
-       int ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic_dec_return_relaxed(v);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic_dec_return arch_atomic_dec_return
-#endif
-
-#endif /* arch_atomic_dec_return_relaxed */
-
-#ifndef arch_atomic_fetch_dec_relaxed
-#ifdef arch_atomic_fetch_dec
-#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec
-#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec
-#define arch_atomic_fetch_dec_relaxed arch_atomic_fetch_dec
-#endif /* arch_atomic_fetch_dec */
-
-#ifndef arch_atomic_fetch_dec
-static __always_inline int
-arch_atomic_fetch_dec(atomic_t *v)
-{
-       return arch_atomic_fetch_sub(1, v);
-}
-#define arch_atomic_fetch_dec arch_atomic_fetch_dec
-#endif
-
-#ifndef arch_atomic_fetch_dec_acquire
-static __always_inline int
-arch_atomic_fetch_dec_acquire(atomic_t *v)
-{
-       return arch_atomic_fetch_sub_acquire(1, v);
-}
-#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec_acquire
-#endif
-
-#ifndef arch_atomic_fetch_dec_release
-static __always_inline int
-arch_atomic_fetch_dec_release(atomic_t *v)
-{
-       return arch_atomic_fetch_sub_release(1, v);
-}
-#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec_release
-#endif
-
-#ifndef arch_atomic_fetch_dec_relaxed
-static __always_inline int
-arch_atomic_fetch_dec_relaxed(atomic_t *v)
-{
-       return arch_atomic_fetch_sub_relaxed(1, v);
-}
-#define arch_atomic_fetch_dec_relaxed arch_atomic_fetch_dec_relaxed
-#endif
-
-#else /* arch_atomic_fetch_dec_relaxed */
-
-#ifndef arch_atomic_fetch_dec_acquire
-static __always_inline int
-arch_atomic_fetch_dec_acquire(atomic_t *v)
-{
-       int ret = arch_atomic_fetch_dec_relaxed(v);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec_acquire
-#endif
-
-#ifndef arch_atomic_fetch_dec_release
-static __always_inline int
-arch_atomic_fetch_dec_release(atomic_t *v)
-{
-       __atomic_release_fence();
-       return arch_atomic_fetch_dec_relaxed(v);
-}
-#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec_release
-#endif
-
-#ifndef arch_atomic_fetch_dec
-static __always_inline int
-arch_atomic_fetch_dec(atomic_t *v)
-{
-       int ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic_fetch_dec_relaxed(v);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic_fetch_dec arch_atomic_fetch_dec
-#endif
-
-#endif /* arch_atomic_fetch_dec_relaxed */
-
-#ifndef arch_atomic_fetch_and_relaxed
-#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and
-#define arch_atomic_fetch_and_release arch_atomic_fetch_and
-#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and
-#else /* arch_atomic_fetch_and_relaxed */
-
-#ifndef arch_atomic_fetch_and_acquire
-static __always_inline int
-arch_atomic_fetch_and_acquire(int i, atomic_t *v)
-{
-       int ret = arch_atomic_fetch_and_relaxed(i, v);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and_acquire
-#endif
-
-#ifndef arch_atomic_fetch_and_release
-static __always_inline int
-arch_atomic_fetch_and_release(int i, atomic_t *v)
-{
-       __atomic_release_fence();
-       return arch_atomic_fetch_and_relaxed(i, v);
-}
-#define arch_atomic_fetch_and_release arch_atomic_fetch_and_release
-#endif
-
-#ifndef arch_atomic_fetch_and
-static __always_inline int
-arch_atomic_fetch_and(int i, atomic_t *v)
-{
-       int ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic_fetch_and_relaxed(i, v);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic_fetch_and arch_atomic_fetch_and
-#endif
-
-#endif /* arch_atomic_fetch_and_relaxed */
-
-#ifndef arch_atomic_andnot
-static __always_inline void
-arch_atomic_andnot(int i, atomic_t *v)
-{
-       arch_atomic_and(~i, v);
-}
-#define arch_atomic_andnot arch_atomic_andnot
-#endif
-
-#ifndef arch_atomic_fetch_andnot_relaxed
-#ifdef arch_atomic_fetch_andnot
-#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot
-#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot
-#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot
-#endif /* arch_atomic_fetch_andnot */
-
-#ifndef arch_atomic_fetch_andnot
-static __always_inline int
-arch_atomic_fetch_andnot(int i, atomic_t *v)
-{
-       return arch_atomic_fetch_and(~i, v);
-}
-#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
-#endif
-
-#ifndef arch_atomic_fetch_andnot_acquire
-static __always_inline int
-arch_atomic_fetch_andnot_acquire(int i, atomic_t *v)
-{
-       return arch_atomic_fetch_and_acquire(~i, v);
-}
-#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire
-#endif
-
-#ifndef arch_atomic_fetch_andnot_release
-static __always_inline int
-arch_atomic_fetch_andnot_release(int i, atomic_t *v)
-{
-       return arch_atomic_fetch_and_release(~i, v);
-}
-#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot_release
-#endif
-
-#ifndef arch_atomic_fetch_andnot_relaxed
-static __always_inline int
-arch_atomic_fetch_andnot_relaxed(int i, atomic_t *v)
-{
-       return arch_atomic_fetch_and_relaxed(~i, v);
-}
-#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed
-#endif
-
-#else /* arch_atomic_fetch_andnot_relaxed */
-
-#ifndef arch_atomic_fetch_andnot_acquire
-static __always_inline int
-arch_atomic_fetch_andnot_acquire(int i, atomic_t *v)
-{
-       int ret = arch_atomic_fetch_andnot_relaxed(i, v);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire
-#endif
-
-#ifndef arch_atomic_fetch_andnot_release
-static __always_inline int
-arch_atomic_fetch_andnot_release(int i, atomic_t *v)
-{
-       __atomic_release_fence();
-       return arch_atomic_fetch_andnot_relaxed(i, v);
-}
-#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot_release
-#endif
-
-#ifndef arch_atomic_fetch_andnot
-static __always_inline int
-arch_atomic_fetch_andnot(int i, atomic_t *v)
-{
-       int ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic_fetch_andnot_relaxed(i, v);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
-#endif
-
-#endif /* arch_atomic_fetch_andnot_relaxed */
-
-#ifndef arch_atomic_fetch_or_relaxed
-#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or
-#define arch_atomic_fetch_or_release arch_atomic_fetch_or
-#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or
-#else /* arch_atomic_fetch_or_relaxed */
-
-#ifndef arch_atomic_fetch_or_acquire
-static __always_inline int
-arch_atomic_fetch_or_acquire(int i, atomic_t *v)
-{
-       int ret = arch_atomic_fetch_or_relaxed(i, v);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or_acquire
-#endif
-
-#ifndef arch_atomic_fetch_or_release
-static __always_inline int
-arch_atomic_fetch_or_release(int i, atomic_t *v)
-{
-       __atomic_release_fence();
-       return arch_atomic_fetch_or_relaxed(i, v);
-}
-#define arch_atomic_fetch_or_release arch_atomic_fetch_or_release
-#endif
-
-#ifndef arch_atomic_fetch_or
-static __always_inline int
-arch_atomic_fetch_or(int i, atomic_t *v)
-{
-       int ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic_fetch_or_relaxed(i, v);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic_fetch_or arch_atomic_fetch_or
-#endif
-
-#endif /* arch_atomic_fetch_or_relaxed */
-
-#ifndef arch_atomic_fetch_xor_relaxed
-#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor
-#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor
-#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor
-#else /* arch_atomic_fetch_xor_relaxed */
-
-#ifndef arch_atomic_fetch_xor_acquire
-static __always_inline int
-arch_atomic_fetch_xor_acquire(int i, atomic_t *v)
-{
-       int ret = arch_atomic_fetch_xor_relaxed(i, v);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor_acquire
-#endif
-
-#ifndef arch_atomic_fetch_xor_release
-static __always_inline int
-arch_atomic_fetch_xor_release(int i, atomic_t *v)
-{
-       __atomic_release_fence();
-       return arch_atomic_fetch_xor_relaxed(i, v);
-}
-#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor_release
-#endif
-
-#ifndef arch_atomic_fetch_xor
-static __always_inline int
-arch_atomic_fetch_xor(int i, atomic_t *v)
-{
-       int ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic_fetch_xor_relaxed(i, v);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic_fetch_xor arch_atomic_fetch_xor
-#endif
-
-#endif /* arch_atomic_fetch_xor_relaxed */
-
-#ifndef arch_atomic_xchg_relaxed
-#define arch_atomic_xchg_acquire arch_atomic_xchg
-#define arch_atomic_xchg_release arch_atomic_xchg
-#define arch_atomic_xchg_relaxed arch_atomic_xchg
-#else /* arch_atomic_xchg_relaxed */
-
-#ifndef arch_atomic_xchg_acquire
-static __always_inline int
-arch_atomic_xchg_acquire(atomic_t *v, int i)
-{
-       int ret = arch_atomic_xchg_relaxed(v, i);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic_xchg_acquire arch_atomic_xchg_acquire
-#endif
-
-#ifndef arch_atomic_xchg_release
-static __always_inline int
-arch_atomic_xchg_release(atomic_t *v, int i)
-{
-       __atomic_release_fence();
-       return arch_atomic_xchg_relaxed(v, i);
-}
-#define arch_atomic_xchg_release arch_atomic_xchg_release
-#endif
-
-#ifndef arch_atomic_xchg
-static __always_inline int
-arch_atomic_xchg(atomic_t *v, int i)
-{
-       int ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic_xchg_relaxed(v, i);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic_xchg arch_atomic_xchg
-#endif
-
-#endif /* arch_atomic_xchg_relaxed */
-
-#ifndef arch_atomic_cmpxchg_relaxed
-#define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg
-#define arch_atomic_cmpxchg_release arch_atomic_cmpxchg
-#define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg
-#else /* arch_atomic_cmpxchg_relaxed */
-
-#ifndef arch_atomic_cmpxchg_acquire
-static __always_inline int
-arch_atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
-{
-       int ret = arch_atomic_cmpxchg_relaxed(v, old, new);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg_acquire
-#endif
-
-#ifndef arch_atomic_cmpxchg_release
-static __always_inline int
-arch_atomic_cmpxchg_release(atomic_t *v, int old, int new)
-{
-       __atomic_release_fence();
-       return arch_atomic_cmpxchg_relaxed(v, old, new);
-}
-#define arch_atomic_cmpxchg_release arch_atomic_cmpxchg_release
-#endif
-
-#ifndef arch_atomic_cmpxchg
-static __always_inline int
-arch_atomic_cmpxchg(atomic_t *v, int old, int new)
-{
-       int ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic_cmpxchg_relaxed(v, old, new);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic_cmpxchg arch_atomic_cmpxchg
-#endif
-
-#endif /* arch_atomic_cmpxchg_relaxed */
-
-#ifndef arch_atomic_try_cmpxchg_relaxed
-#ifdef arch_atomic_try_cmpxchg
-#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg
-#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg
-#define arch_atomic_try_cmpxchg_relaxed arch_atomic_try_cmpxchg
-#endif /* arch_atomic_try_cmpxchg */
-
-#ifndef arch_atomic_try_cmpxchg
-static __always_inline bool
-arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
-{
-       int r, o = *old;
-       r = arch_atomic_cmpxchg(v, o, new);
-       if (unlikely(r != o))
-               *old = r;
-       return likely(r == o);
-}
-#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
-#endif
-
-#ifndef arch_atomic_try_cmpxchg_acquire
-static __always_inline bool
-arch_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
-{
-       int r, o = *old;
-       r = arch_atomic_cmpxchg_acquire(v, o, new);
-       if (unlikely(r != o))
-               *old = r;
-       return likely(r == o);
-}
-#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg_acquire
-#endif
-
-#ifndef arch_atomic_try_cmpxchg_release
-static __always_inline bool
-arch_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
-{
-       int r, o = *old;
-       r = arch_atomic_cmpxchg_release(v, o, new);
-       if (unlikely(r != o))
-               *old = r;
-       return likely(r == o);
-}
-#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg_release
-#endif
-
-#ifndef arch_atomic_try_cmpxchg_relaxed
-static __always_inline bool
-arch_atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
-{
-       int r, o = *old;
-       r = arch_atomic_cmpxchg_relaxed(v, o, new);
-       if (unlikely(r != o))
-               *old = r;
-       return likely(r == o);
-}
-#define arch_atomic_try_cmpxchg_relaxed arch_atomic_try_cmpxchg_relaxed
-#endif
-
-#else /* arch_atomic_try_cmpxchg_relaxed */
-
-#ifndef arch_atomic_try_cmpxchg_acquire
-static __always_inline bool
-arch_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
-{
-       bool ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg_acquire
-#endif
-
-#ifndef arch_atomic_try_cmpxchg_release
-static __always_inline bool
-arch_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
-{
-       __atomic_release_fence();
-       return arch_atomic_try_cmpxchg_relaxed(v, old, new);
-}
-#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg_release
-#endif
-
-#ifndef arch_atomic_try_cmpxchg
-static __always_inline bool
-arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
-{
-       bool ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
-#endif
-
-#endif /* arch_atomic_try_cmpxchg_relaxed */
-
-#ifndef arch_atomic_sub_and_test
-/**
- * arch_atomic_sub_and_test - subtract value from variable and test result
- * @i: integer value to subtract
- * @v: pointer of type atomic_t
- *
- * Atomically subtracts @i from @v and returns
- * true if the result is zero, or false for all
- * other cases.
- */
-static __always_inline bool
-arch_atomic_sub_and_test(int i, atomic_t *v)
-{
-       return arch_atomic_sub_return(i, v) == 0;
-}
-#define arch_atomic_sub_and_test arch_atomic_sub_and_test
-#endif
-
-#ifndef arch_atomic_dec_and_test
-/**
- * arch_atomic_dec_and_test - decrement and test
- * @v: pointer of type atomic_t
- *
- * Atomically decrements @v by 1 and
- * returns true if the result is 0, or false for all other
- * cases.
- */
-static __always_inline bool
-arch_atomic_dec_and_test(atomic_t *v)
-{
-       return arch_atomic_dec_return(v) == 0;
-}
-#define arch_atomic_dec_and_test arch_atomic_dec_and_test
-#endif
-
-#ifndef arch_atomic_inc_and_test
-/**
- * arch_atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-static __always_inline bool
-arch_atomic_inc_and_test(atomic_t *v)
-{
-       return arch_atomic_inc_return(v) == 0;
-}
-#define arch_atomic_inc_and_test arch_atomic_inc_and_test
-#endif
-
-#ifndef arch_atomic_add_negative
-/**
- * arch_atomic_add_negative - add and test if negative
- * @i: integer value to add
- * @v: pointer of type atomic_t
- *
- * Atomically adds @i to @v and returns true
- * if the result is negative, or false when
- * result is greater than or equal to zero.
- */
-static __always_inline bool
-arch_atomic_add_negative(int i, atomic_t *v)
-{
-       return arch_atomic_add_return(i, v) < 0;
-}
-#define arch_atomic_add_negative arch_atomic_add_negative
-#endif
-
-#ifndef arch_atomic_fetch_add_unless
-/**
- * arch_atomic_fetch_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as @v was not already @u.
- * Returns original value of @v
- */
-static __always_inline int
-arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
-{
-       int c = arch_atomic_read(v);
-
-       do {
-               if (unlikely(c == u))
-                       break;
-       } while (!arch_atomic_try_cmpxchg(v, &c, c + a));
-
-       return c;
-}
-#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
-#endif
-
-#ifndef arch_atomic_add_unless
-/**
- * arch_atomic_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, if @v was not already @u.
- * Returns true if the addition was done.
- */
-static __always_inline bool
-arch_atomic_add_unless(atomic_t *v, int a, int u)
-{
-       return arch_atomic_fetch_add_unless(v, a, u) != u;
-}
-#define arch_atomic_add_unless arch_atomic_add_unless
-#endif
-
-#ifndef arch_atomic_inc_not_zero
-/**
- * arch_atomic_inc_not_zero - increment unless the number is zero
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1, if @v is non-zero.
- * Returns true if the increment was done.
- */
-static __always_inline bool
-arch_atomic_inc_not_zero(atomic_t *v)
-{
-       return arch_atomic_add_unless(v, 1, 0);
-}
-#define arch_atomic_inc_not_zero arch_atomic_inc_not_zero
-#endif
-
-#ifndef arch_atomic_inc_unless_negative
-static __always_inline bool
-arch_atomic_inc_unless_negative(atomic_t *v)
-{
-       int c = arch_atomic_read(v);
-
-       do {
-               if (unlikely(c < 0))
-                       return false;
-       } while (!arch_atomic_try_cmpxchg(v, &c, c + 1));
-
-       return true;
-}
-#define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative
-#endif
-
-#ifndef arch_atomic_dec_unless_positive
-static __always_inline bool
-arch_atomic_dec_unless_positive(atomic_t *v)
-{
-       int c = arch_atomic_read(v);
-
-       do {
-               if (unlikely(c > 0))
-                       return false;
-       } while (!arch_atomic_try_cmpxchg(v, &c, c - 1));
-
-       return true;
-}
-#define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive
-#endif
-
-#ifndef arch_atomic_dec_if_positive
-static __always_inline int
-arch_atomic_dec_if_positive(atomic_t *v)
-{
-       int dec, c = arch_atomic_read(v);
-
-       do {
-               dec = c - 1;
-               if (unlikely(dec < 0))
-                       break;
-       } while (!arch_atomic_try_cmpxchg(v, &c, dec));
-
-       return dec;
-}
-#define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
-#endif
-
-#ifdef CONFIG_GENERIC_ATOMIC64
-#include <asm-generic/atomic64.h>
-#endif
-
-#ifndef arch_atomic64_read_acquire
-static __always_inline s64
-arch_atomic64_read_acquire(const atomic64_t *v)
-{
-       return smp_load_acquire(&(v)->counter);
-}
-#define arch_atomic64_read_acquire arch_atomic64_read_acquire
-#endif
-
-#ifndef arch_atomic64_set_release
-static __always_inline void
-arch_atomic64_set_release(atomic64_t *v, s64 i)
-{
-       smp_store_release(&(v)->counter, i);
-}
-#define arch_atomic64_set_release arch_atomic64_set_release
-#endif
-
-#ifndef arch_atomic64_add_return_relaxed
-#define arch_atomic64_add_return_acquire arch_atomic64_add_return
-#define arch_atomic64_add_return_release arch_atomic64_add_return
-#define arch_atomic64_add_return_relaxed arch_atomic64_add_return
-#else /* arch_atomic64_add_return_relaxed */
-
-#ifndef arch_atomic64_add_return_acquire
-static __always_inline s64
-arch_atomic64_add_return_acquire(s64 i, atomic64_t *v)
-{
-       s64 ret = arch_atomic64_add_return_relaxed(i, v);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic64_add_return_acquire arch_atomic64_add_return_acquire
-#endif
-
-#ifndef arch_atomic64_add_return_release
-static __always_inline s64
-arch_atomic64_add_return_release(s64 i, atomic64_t *v)
-{
-       __atomic_release_fence();
-       return arch_atomic64_add_return_relaxed(i, v);
-}
-#define arch_atomic64_add_return_release arch_atomic64_add_return_release
-#endif
-
-#ifndef arch_atomic64_add_return
-static __always_inline s64
-arch_atomic64_add_return(s64 i, atomic64_t *v)
-{
-       s64 ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic64_add_return_relaxed(i, v);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic64_add_return arch_atomic64_add_return
-#endif
-
-#endif /* arch_atomic64_add_return_relaxed */
-
-#ifndef arch_atomic64_fetch_add_relaxed
-#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add
-#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add
-#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add
-#else /* arch_atomic64_fetch_add_relaxed */
-
-#ifndef arch_atomic64_fetch_add_acquire
-static __always_inline s64
-arch_atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
-{
-       s64 ret = arch_atomic64_fetch_add_relaxed(i, v);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add_acquire
-#endif
-
-#ifndef arch_atomic64_fetch_add_release
-static __always_inline s64
-arch_atomic64_fetch_add_release(s64 i, atomic64_t *v)
-{
-       __atomic_release_fence();
-       return arch_atomic64_fetch_add_relaxed(i, v);
-}
-#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add_release
-#endif
-
-#ifndef arch_atomic64_fetch_add
-static __always_inline s64
-arch_atomic64_fetch_add(s64 i, atomic64_t *v)
-{
-       s64 ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic64_fetch_add_relaxed(i, v);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic64_fetch_add arch_atomic64_fetch_add
-#endif
-
-#endif /* arch_atomic64_fetch_add_relaxed */
-
-#ifndef arch_atomic64_sub_return_relaxed
-#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return
-#define arch_atomic64_sub_return_release arch_atomic64_sub_return
-#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return
-#else /* arch_atomic64_sub_return_relaxed */
-
-#ifndef arch_atomic64_sub_return_acquire
-static __always_inline s64
-arch_atomic64_sub_return_acquire(s64 i, atomic64_t *v)
-{
-       s64 ret = arch_atomic64_sub_return_relaxed(i, v);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return_acquire
-#endif
-
-#ifndef arch_atomic64_sub_return_release
-static __always_inline s64
-arch_atomic64_sub_return_release(s64 i, atomic64_t *v)
-{
-       __atomic_release_fence();
-       return arch_atomic64_sub_return_relaxed(i, v);
-}
-#define arch_atomic64_sub_return_release arch_atomic64_sub_return_release
-#endif
-
-#ifndef arch_atomic64_sub_return
-static __always_inline s64
-arch_atomic64_sub_return(s64 i, atomic64_t *v)
-{
-       s64 ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic64_sub_return_relaxed(i, v);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic64_sub_return arch_atomic64_sub_return
-#endif
-
-#endif /* arch_atomic64_sub_return_relaxed */
-
-#ifndef arch_atomic64_fetch_sub_relaxed
-#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub
-#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub
-#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub
-#else /* arch_atomic64_fetch_sub_relaxed */
-
-#ifndef arch_atomic64_fetch_sub_acquire
-static __always_inline s64
-arch_atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
-{
-       s64 ret = arch_atomic64_fetch_sub_relaxed(i, v);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub_acquire
-#endif
-
-#ifndef arch_atomic64_fetch_sub_release
-static __always_inline s64
-arch_atomic64_fetch_sub_release(s64 i, atomic64_t *v)
-{
-       __atomic_release_fence();
-       return arch_atomic64_fetch_sub_relaxed(i, v);
-}
-#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub_release
-#endif
-
-#ifndef arch_atomic64_fetch_sub
-static __always_inline s64
-arch_atomic64_fetch_sub(s64 i, atomic64_t *v)
-{
-       s64 ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic64_fetch_sub_relaxed(i, v);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
-#endif
-
-#endif /* arch_atomic64_fetch_sub_relaxed */
-
-#ifndef arch_atomic64_inc
-static __always_inline void
-arch_atomic64_inc(atomic64_t *v)
-{
-       arch_atomic64_add(1, v);
-}
-#define arch_atomic64_inc arch_atomic64_inc
-#endif
-
-#ifndef arch_atomic64_inc_return_relaxed
-#ifdef arch_atomic64_inc_return
-#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return
-#define arch_atomic64_inc_return_release arch_atomic64_inc_return
-#define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return
-#endif /* arch_atomic64_inc_return */
-
-#ifndef arch_atomic64_inc_return
-static __always_inline s64
-arch_atomic64_inc_return(atomic64_t *v)
-{
-       return arch_atomic64_add_return(1, v);
-}
-#define arch_atomic64_inc_return arch_atomic64_inc_return
-#endif
-
-#ifndef arch_atomic64_inc_return_acquire
-static __always_inline s64
-arch_atomic64_inc_return_acquire(atomic64_t *v)
-{
-       return arch_atomic64_add_return_acquire(1, v);
-}
-#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return_acquire
-#endif
-
-#ifndef arch_atomic64_inc_return_release
-static __always_inline s64
-arch_atomic64_inc_return_release(atomic64_t *v)
-{
-       return arch_atomic64_add_return_release(1, v);
-}
-#define arch_atomic64_inc_return_release arch_atomic64_inc_return_release
-#endif
-
-#ifndef arch_atomic64_inc_return_relaxed
-static __always_inline s64
-arch_atomic64_inc_return_relaxed(atomic64_t *v)
-{
-       return arch_atomic64_add_return_relaxed(1, v);
-}
-#define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return_relaxed
-#endif
-
-#else /* arch_atomic64_inc_return_relaxed */
-
-#ifndef arch_atomic64_inc_return_acquire
-static __always_inline s64
-arch_atomic64_inc_return_acquire(atomic64_t *v)
-{
-       s64 ret = arch_atomic64_inc_return_relaxed(v);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return_acquire
-#endif
-
-#ifndef arch_atomic64_inc_return_release
-static __always_inline s64
-arch_atomic64_inc_return_release(atomic64_t *v)
-{
-       __atomic_release_fence();
-       return arch_atomic64_inc_return_relaxed(v);
-}
-#define arch_atomic64_inc_return_release arch_atomic64_inc_return_release
-#endif
-
-#ifndef arch_atomic64_inc_return
-static __always_inline s64
-arch_atomic64_inc_return(atomic64_t *v)
-{
-       s64 ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic64_inc_return_relaxed(v);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic64_inc_return arch_atomic64_inc_return
-#endif
-
-#endif /* arch_atomic64_inc_return_relaxed */
-
-#ifndef arch_atomic64_fetch_inc_relaxed
-#ifdef arch_atomic64_fetch_inc
-#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc
-#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc
-#define arch_atomic64_fetch_inc_relaxed arch_atomic64_fetch_inc
-#endif /* arch_atomic64_fetch_inc */
-
-#ifndef arch_atomic64_fetch_inc
-static __always_inline s64
-arch_atomic64_fetch_inc(atomic64_t *v)
-{
-       return arch_atomic64_fetch_add(1, v);
-}
-#define arch_atomic64_fetch_inc arch_atomic64_fetch_inc
-#endif
-
-#ifndef arch_atomic64_fetch_inc_acquire
-static __always_inline s64
-arch_atomic64_fetch_inc_acquire(atomic64_t *v)
-{
-       return arch_atomic64_fetch_add_acquire(1, v);
-}
-#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc_acquire
-#endif
-
-#ifndef arch_atomic64_fetch_inc_release
-static __always_inline s64
-arch_atomic64_fetch_inc_release(atomic64_t *v)
-{
-       return arch_atomic64_fetch_add_release(1, v);
-}
-#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc_release
-#endif
-
-#ifndef arch_atomic64_fetch_inc_relaxed
-static __always_inline s64
-arch_atomic64_fetch_inc_relaxed(atomic64_t *v)
-{
-       return arch_atomic64_fetch_add_relaxed(1, v);
-}
-#define arch_atomic64_fetch_inc_relaxed arch_atomic64_fetch_inc_relaxed
-#endif
-
-#else /* arch_atomic64_fetch_inc_relaxed */
-
-#ifndef arch_atomic64_fetch_inc_acquire
-static __always_inline s64
-arch_atomic64_fetch_inc_acquire(atomic64_t *v)
-{
-       s64 ret = arch_atomic64_fetch_inc_relaxed(v);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc_acquire
-#endif
-
-#ifndef arch_atomic64_fetch_inc_release
-static __always_inline s64
-arch_atomic64_fetch_inc_release(atomic64_t *v)
-{
-       __atomic_release_fence();
-       return arch_atomic64_fetch_inc_relaxed(v);
-}
-#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc_release
-#endif
-
-#ifndef arch_atomic64_fetch_inc
-static __always_inline s64
-arch_atomic64_fetch_inc(atomic64_t *v)
-{
-       s64 ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic64_fetch_inc_relaxed(v);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic64_fetch_inc arch_atomic64_fetch_inc
-#endif
-
-#endif /* arch_atomic64_fetch_inc_relaxed */
-
-#ifndef arch_atomic64_dec
-static __always_inline void
-arch_atomic64_dec(atomic64_t *v)
-{
-       arch_atomic64_sub(1, v);
-}
-#define arch_atomic64_dec arch_atomic64_dec
-#endif
-
-#ifndef arch_atomic64_dec_return_relaxed
-#ifdef arch_atomic64_dec_return
-#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return
-#define arch_atomic64_dec_return_release arch_atomic64_dec_return
-#define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return
-#endif /* arch_atomic64_dec_return */
-
-#ifndef arch_atomic64_dec_return
-static __always_inline s64
-arch_atomic64_dec_return(atomic64_t *v)
-{
-       return arch_atomic64_sub_return(1, v);
-}
-#define arch_atomic64_dec_return arch_atomic64_dec_return
-#endif
-
-#ifndef arch_atomic64_dec_return_acquire
-static __always_inline s64
-arch_atomic64_dec_return_acquire(atomic64_t *v)
-{
-       return arch_atomic64_sub_return_acquire(1, v);
-}
-#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return_acquire
-#endif
-
-#ifndef arch_atomic64_dec_return_release
-static __always_inline s64
-arch_atomic64_dec_return_release(atomic64_t *v)
-{
-       return arch_atomic64_sub_return_release(1, v);
-}
-#define arch_atomic64_dec_return_release arch_atomic64_dec_return_release
-#endif
-
-#ifndef arch_atomic64_dec_return_relaxed
-static __always_inline s64
-arch_atomic64_dec_return_relaxed(atomic64_t *v)
-{
-       return arch_atomic64_sub_return_relaxed(1, v);
-}
-#define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return_relaxed
-#endif
-
-#else /* arch_atomic64_dec_return_relaxed */
-
-#ifndef arch_atomic64_dec_return_acquire
-static __always_inline s64
-arch_atomic64_dec_return_acquire(atomic64_t *v)
-{
-       s64 ret = arch_atomic64_dec_return_relaxed(v);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return_acquire
-#endif
-
-#ifndef arch_atomic64_dec_return_release
-static __always_inline s64
-arch_atomic64_dec_return_release(atomic64_t *v)
-{
-       __atomic_release_fence();
-       return arch_atomic64_dec_return_relaxed(v);
-}
-#define arch_atomic64_dec_return_release arch_atomic64_dec_return_release
-#endif
-
-#ifndef arch_atomic64_dec_return
-static __always_inline s64
-arch_atomic64_dec_return(atomic64_t *v)
-{
-       s64 ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic64_dec_return_relaxed(v);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic64_dec_return arch_atomic64_dec_return
-#endif
-
-#endif /* arch_atomic64_dec_return_relaxed */
-
-#ifndef arch_atomic64_fetch_dec_relaxed
-#ifdef arch_atomic64_fetch_dec
-#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec
-#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec
-#define arch_atomic64_fetch_dec_relaxed arch_atomic64_fetch_dec
-#endif /* arch_atomic64_fetch_dec */
-
-#ifndef arch_atomic64_fetch_dec
-static __always_inline s64
-arch_atomic64_fetch_dec(atomic64_t *v)
-{
-       return arch_atomic64_fetch_sub(1, v);
-}
-#define arch_atomic64_fetch_dec arch_atomic64_fetch_dec
-#endif
-
-#ifndef arch_atomic64_fetch_dec_acquire
-static __always_inline s64
-arch_atomic64_fetch_dec_acquire(atomic64_t *v)
-{
-       return arch_atomic64_fetch_sub_acquire(1, v);
-}
-#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec_acquire
-#endif
-
-#ifndef arch_atomic64_fetch_dec_release
-static __always_inline s64
-arch_atomic64_fetch_dec_release(atomic64_t *v)
-{
-       return arch_atomic64_fetch_sub_release(1, v);
-}
-#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec_release
-#endif
-
-#ifndef arch_atomic64_fetch_dec_relaxed
-static __always_inline s64
-arch_atomic64_fetch_dec_relaxed(atomic64_t *v)
-{
-       return arch_atomic64_fetch_sub_relaxed(1, v);
-}
-#define arch_atomic64_fetch_dec_relaxed arch_atomic64_fetch_dec_relaxed
-#endif
-
-#else /* arch_atomic64_fetch_dec_relaxed */
-
-#ifndef arch_atomic64_fetch_dec_acquire
-static __always_inline s64
-arch_atomic64_fetch_dec_acquire(atomic64_t *v)
-{
-       s64 ret = arch_atomic64_fetch_dec_relaxed(v);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec_acquire
-#endif
-
-#ifndef arch_atomic64_fetch_dec_release
-static __always_inline s64
-arch_atomic64_fetch_dec_release(atomic64_t *v)
-{
-       __atomic_release_fence();
-       return arch_atomic64_fetch_dec_relaxed(v);
-}
-#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec_release
-#endif
-
-#ifndef arch_atomic64_fetch_dec
-static __always_inline s64
-arch_atomic64_fetch_dec(atomic64_t *v)
-{
-       s64 ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic64_fetch_dec_relaxed(v);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic64_fetch_dec arch_atomic64_fetch_dec
-#endif
-
-#endif /* arch_atomic64_fetch_dec_relaxed */
-
-#ifndef arch_atomic64_fetch_and_relaxed
-#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and
-#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and
-#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and
-#else /* arch_atomic64_fetch_and_relaxed */
-
-#ifndef arch_atomic64_fetch_and_acquire
-static __always_inline s64
-arch_atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
-{
-       s64 ret = arch_atomic64_fetch_and_relaxed(i, v);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and_acquire
-#endif
-
-#ifndef arch_atomic64_fetch_and_release
-static __always_inline s64
-arch_atomic64_fetch_and_release(s64 i, atomic64_t *v)
-{
-       __atomic_release_fence();
-       return arch_atomic64_fetch_and_relaxed(i, v);
-}
-#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and_release
-#endif
-
-#ifndef arch_atomic64_fetch_and
-static __always_inline s64
-arch_atomic64_fetch_and(s64 i, atomic64_t *v)
-{
-       s64 ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic64_fetch_and_relaxed(i, v);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic64_fetch_and arch_atomic64_fetch_and
-#endif
-
-#endif /* arch_atomic64_fetch_and_relaxed */
-
-#ifndef arch_atomic64_andnot
-static __always_inline void
-arch_atomic64_andnot(s64 i, atomic64_t *v)
-{
-       arch_atomic64_and(~i, v);
-}
-#define arch_atomic64_andnot arch_atomic64_andnot
-#endif
-
-#ifndef arch_atomic64_fetch_andnot_relaxed
-#ifdef arch_atomic64_fetch_andnot
-#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot
-#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot
-#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot
-#endif /* arch_atomic64_fetch_andnot */
-
-#ifndef arch_atomic64_fetch_andnot
-static __always_inline s64
-arch_atomic64_fetch_andnot(s64 i, atomic64_t *v)
-{
-       return arch_atomic64_fetch_and(~i, v);
-}
-#define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot
-#endif
-
-#ifndef arch_atomic64_fetch_andnot_acquire
-static __always_inline s64
-arch_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
-{
-       return arch_atomic64_fetch_and_acquire(~i, v);
-}
-#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot_acquire
-#endif
-
-#ifndef arch_atomic64_fetch_andnot_release
-static __always_inline s64
-arch_atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
-{
-       return arch_atomic64_fetch_and_release(~i, v);
-}
-#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot_release
-#endif
-
-#ifndef arch_atomic64_fetch_andnot_relaxed
-static __always_inline s64
-arch_atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
-{
-       return arch_atomic64_fetch_and_relaxed(~i, v);
-}
-#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed
-#endif
-
-#else /* arch_atomic64_fetch_andnot_relaxed */
-
-#ifndef arch_atomic64_fetch_andnot_acquire
-static __always_inline s64
-arch_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
-{
-       s64 ret = arch_atomic64_fetch_andnot_relaxed(i, v);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot_acquire
-#endif
-
-#ifndef arch_atomic64_fetch_andnot_release
-static __always_inline s64
-arch_atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
-{
-       __atomic_release_fence();
-       return arch_atomic64_fetch_andnot_relaxed(i, v);
-}
-#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot_release
-#endif
-
-#ifndef arch_atomic64_fetch_andnot
-static __always_inline s64
-arch_atomic64_fetch_andnot(s64 i, atomic64_t *v)
-{
-       s64 ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic64_fetch_andnot_relaxed(i, v);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot
-#endif
-
-#endif /* arch_atomic64_fetch_andnot_relaxed */
-
-#ifndef arch_atomic64_fetch_or_relaxed
-#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or
-#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or
-#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or
-#else /* arch_atomic64_fetch_or_relaxed */
-
-#ifndef arch_atomic64_fetch_or_acquire
-static __always_inline s64
-arch_atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
-{
-       s64 ret = arch_atomic64_fetch_or_relaxed(i, v);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or_acquire
-#endif
-
-#ifndef arch_atomic64_fetch_or_release
-static __always_inline s64
-arch_atomic64_fetch_or_release(s64 i, atomic64_t *v)
-{
-       __atomic_release_fence();
-       return arch_atomic64_fetch_or_relaxed(i, v);
-}
-#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or_release
-#endif
-
-#ifndef arch_atomic64_fetch_or
-static __always_inline s64
-arch_atomic64_fetch_or(s64 i, atomic64_t *v)
-{
-       s64 ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic64_fetch_or_relaxed(i, v);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic64_fetch_or arch_atomic64_fetch_or
-#endif
-
-#endif /* arch_atomic64_fetch_or_relaxed */
-
-#ifndef arch_atomic64_fetch_xor_relaxed
-#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor
-#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor
-#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor
-#else /* arch_atomic64_fetch_xor_relaxed */
-
-#ifndef arch_atomic64_fetch_xor_acquire
-static __always_inline s64
-arch_atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
-{
-       s64 ret = arch_atomic64_fetch_xor_relaxed(i, v);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor_acquire
-#endif
-
-#ifndef arch_atomic64_fetch_xor_release
-static __always_inline s64
-arch_atomic64_fetch_xor_release(s64 i, atomic64_t *v)
-{
-       __atomic_release_fence();
-       return arch_atomic64_fetch_xor_relaxed(i, v);
-}
-#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor_release
-#endif
-
-#ifndef arch_atomic64_fetch_xor
-static __always_inline s64
-arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
-{
-       s64 ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic64_fetch_xor_relaxed(i, v);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
-#endif
-
-#endif /* arch_atomic64_fetch_xor_relaxed */
-
-#ifndef arch_atomic64_xchg_relaxed
-#define arch_atomic64_xchg_acquire arch_atomic64_xchg
-#define arch_atomic64_xchg_release arch_atomic64_xchg
-#define arch_atomic64_xchg_relaxed arch_atomic64_xchg
-#else /* arch_atomic64_xchg_relaxed */
-
-#ifndef arch_atomic64_xchg_acquire
-static __always_inline s64
-arch_atomic64_xchg_acquire(atomic64_t *v, s64 i)
-{
-       s64 ret = arch_atomic64_xchg_relaxed(v, i);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic64_xchg_acquire arch_atomic64_xchg_acquire
-#endif
-
-#ifndef arch_atomic64_xchg_release
-static __always_inline s64
-arch_atomic64_xchg_release(atomic64_t *v, s64 i)
-{
-       __atomic_release_fence();
-       return arch_atomic64_xchg_relaxed(v, i);
-}
-#define arch_atomic64_xchg_release arch_atomic64_xchg_release
-#endif
-
-#ifndef arch_atomic64_xchg
-static __always_inline s64
-arch_atomic64_xchg(atomic64_t *v, s64 i)
-{
-       s64 ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic64_xchg_relaxed(v, i);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic64_xchg arch_atomic64_xchg
-#endif
-
-#endif /* arch_atomic64_xchg_relaxed */
-
-#ifndef arch_atomic64_cmpxchg_relaxed
-#define arch_atomic64_cmpxchg_acquire arch_atomic64_cmpxchg
-#define arch_atomic64_cmpxchg_release arch_atomic64_cmpxchg
-#define arch_atomic64_cmpxchg_relaxed arch_atomic64_cmpxchg
-#else /* arch_atomic64_cmpxchg_relaxed */
-
-#ifndef arch_atomic64_cmpxchg_acquire
-static __always_inline s64
-arch_atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
-{
-       s64 ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic64_cmpxchg_acquire arch_atomic64_cmpxchg_acquire
-#endif
-
-#ifndef arch_atomic64_cmpxchg_release
-static __always_inline s64
-arch_atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
-{
-       __atomic_release_fence();
-       return arch_atomic64_cmpxchg_relaxed(v, old, new);
-}
-#define arch_atomic64_cmpxchg_release arch_atomic64_cmpxchg_release
-#endif
-
-#ifndef arch_atomic64_cmpxchg
-static __always_inline s64
-arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
-{
-       s64 ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
-#endif
-
-#endif /* arch_atomic64_cmpxchg_relaxed */
-
-#ifndef arch_atomic64_try_cmpxchg_relaxed
-#ifdef arch_atomic64_try_cmpxchg
-#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg
-#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg
-#define arch_atomic64_try_cmpxchg_relaxed arch_atomic64_try_cmpxchg
-#endif /* arch_atomic64_try_cmpxchg */
-
-#ifndef arch_atomic64_try_cmpxchg
-static __always_inline bool
-arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
-{
-       s64 r, o = *old;
-       r = arch_atomic64_cmpxchg(v, o, new);
-       if (unlikely(r != o))
-               *old = r;
-       return likely(r == o);
-}
-#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
-#endif
-
-#ifndef arch_atomic64_try_cmpxchg_acquire
-static __always_inline bool
-arch_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
-{
-       s64 r, o = *old;
-       r = arch_atomic64_cmpxchg_acquire(v, o, new);
-       if (unlikely(r != o))
-               *old = r;
-       return likely(r == o);
-}
-#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg_acquire
-#endif
-
-#ifndef arch_atomic64_try_cmpxchg_release
-static __always_inline bool
-arch_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
-{
-       s64 r, o = *old;
-       r = arch_atomic64_cmpxchg_release(v, o, new);
-       if (unlikely(r != o))
-               *old = r;
-       return likely(r == o);
-}
-#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg_release
-#endif
-
-#ifndef arch_atomic64_try_cmpxchg_relaxed
-static __always_inline bool
-arch_atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
-{
-       s64 r, o = *old;
-       r = arch_atomic64_cmpxchg_relaxed(v, o, new);
-       if (unlikely(r != o))
-               *old = r;
-       return likely(r == o);
-}
-#define arch_atomic64_try_cmpxchg_relaxed arch_atomic64_try_cmpxchg_relaxed
-#endif
-
-#else /* arch_atomic64_try_cmpxchg_relaxed */
-
-#ifndef arch_atomic64_try_cmpxchg_acquire
-static __always_inline bool
-arch_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
-{
-       bool ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
-       __atomic_acquire_fence();
-       return ret;
-}
-#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg_acquire
-#endif
-
-#ifndef arch_atomic64_try_cmpxchg_release
-static __always_inline bool
-arch_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
-{
-       __atomic_release_fence();
-       return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
-}
-#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg_release
-#endif
-
-#ifndef arch_atomic64_try_cmpxchg
-static __always_inline bool
-arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
-{
-       bool ret;
-       __atomic_pre_full_fence();
-       ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
-       __atomic_post_full_fence();
-       return ret;
-}
-#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
-#endif
-
-#endif /* arch_atomic64_try_cmpxchg_relaxed */
-
-#ifndef arch_atomic64_sub_and_test
-/**
- * arch_atomic64_sub_and_test - subtract value from variable and test result
- * @i: integer value to subtract
- * @v: pointer of type atomic64_t
- *
- * Atomically subtracts @i from @v and returns
- * true if the result is zero, or false for all
- * other cases.
- */
-static __always_inline bool
-arch_atomic64_sub_and_test(s64 i, atomic64_t *v)
-{
-       return arch_atomic64_sub_return(i, v) == 0;
-}
-#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
-#endif
-
-#ifndef arch_atomic64_dec_and_test
-/**
- * arch_atomic64_dec_and_test - decrement and test
- * @v: pointer of type atomic64_t
- *
- * Atomically decrements @v by 1 and
- * returns true if the result is 0, or false for all other
- * cases.
- */
-static __always_inline bool
-arch_atomic64_dec_and_test(atomic64_t *v)
-{
-       return arch_atomic64_dec_return(v) == 0;
-}
-#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
-#endif
-
-#ifndef arch_atomic64_inc_and_test
-/**
- * arch_atomic64_inc_and_test - increment and test
- * @v: pointer of type atomic64_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-static __always_inline bool
-arch_atomic64_inc_and_test(atomic64_t *v)
-{
-       return arch_atomic64_inc_return(v) == 0;
-}
-#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
-#endif
-
-#ifndef arch_atomic64_add_negative
-/**
- * arch_atomic64_add_negative - add and test if negative
- * @i: integer value to add
- * @v: pointer of type atomic64_t
- *
- * Atomically adds @i to @v and returns true
- * if the result is negative, or false when
- * result is greater than or equal to zero.
- */
-static __always_inline bool
-arch_atomic64_add_negative(s64 i, atomic64_t *v)
-{
-       return arch_atomic64_add_return(i, v) < 0;
-}
-#define arch_atomic64_add_negative arch_atomic64_add_negative
-#endif
-
-#ifndef arch_atomic64_fetch_add_unless
-/**
- * arch_atomic64_fetch_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic64_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as @v was not already @u.
- * Returns original value of @v
- */
-static __always_inline s64
-arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
-{
-       s64 c = arch_atomic64_read(v);
-
-       do {
-               if (unlikely(c == u))
-                       break;
-       } while (!arch_atomic64_try_cmpxchg(v, &c, c + a));
-
-       return c;
-}
-#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
-#endif
-
-#ifndef arch_atomic64_add_unless
-/**
- * arch_atomic64_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic64_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, if @v was not already @u.
- * Returns true if the addition was done.
- */
-static __always_inline bool
-arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
-{
-       return arch_atomic64_fetch_add_unless(v, a, u) != u;
-}
-#define arch_atomic64_add_unless arch_atomic64_add_unless
-#endif
-
-#ifndef arch_atomic64_inc_not_zero
-/**
- * arch_atomic64_inc_not_zero - increment unless the number is zero
- * @v: pointer of type atomic64_t
- *
- * Atomically increments @v by 1, if @v is non-zero.
- * Returns true if the increment was done.
- */
-static __always_inline bool
-arch_atomic64_inc_not_zero(atomic64_t *v)
-{
-       return arch_atomic64_add_unless(v, 1, 0);
-}
-#define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero
-#endif
-
-#ifndef arch_atomic64_inc_unless_negative
-static __always_inline bool
-arch_atomic64_inc_unless_negative(atomic64_t *v)
-{
-       s64 c = arch_atomic64_read(v);
-
-       do {
-               if (unlikely(c < 0))
-                       return false;
-       } while (!arch_atomic64_try_cmpxchg(v, &c, c + 1));
-
-       return true;
-}
-#define arch_atomic64_inc_unless_negative arch_atomic64_inc_unless_negative
-#endif
-
-#ifndef arch_atomic64_dec_unless_positive
-static __always_inline bool
-arch_atomic64_dec_unless_positive(atomic64_t *v)
-{
-       s64 c = arch_atomic64_read(v);
-
-       do {
-               if (unlikely(c > 0))
-                       return false;
-       } while (!arch_atomic64_try_cmpxchg(v, &c, c - 1));
-
-       return true;
-}
-#define arch_atomic64_dec_unless_positive arch_atomic64_dec_unless_positive
-#endif
-
-#ifndef arch_atomic64_dec_if_positive
-static __always_inline s64
-arch_atomic64_dec_if_positive(atomic64_t *v)
-{
-       s64 dec, c = arch_atomic64_read(v);
-
-       do {
-               dec = c - 1;
-               if (unlikely(dec < 0))
-                       break;
-       } while (!arch_atomic64_try_cmpxchg(v, &c, dec));
-
-       return dec;
-}
-#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
-#endif
-
-#endif /* _LINUX_ATOMIC_FALLBACK_H */
-// cca554917d7ea73d5e3e7397dd70c484cad9b2c4
index ed1d3ff..8dd57c3 100644 (file)
@@ -77,9 +77,8 @@
        __ret;                                                          \
 })
 
-#include <linux/atomic-arch-fallback.h>
-#include <asm-generic/atomic-instrumented.h>
-
-#include <asm-generic/atomic-long.h>
+#include <linux/atomic/atomic-arch-fallback.h>
+#include <linux/atomic/atomic-long.h>
+#include <linux/atomic/atomic-instrumented.h>
 
 #endif /* _LINUX_ATOMIC_H */
diff --git a/include/linux/atomic/atomic-arch-fallback.h b/include/linux/atomic/atomic-arch-fallback.h
new file mode 100644 (file)
index 0000000..a3dba31
--- /dev/null
@@ -0,0 +1,2361 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Generated by scripts/atomic/gen-atomic-fallback.sh
+// DO NOT MODIFY THIS FILE DIRECTLY
+
+#ifndef _LINUX_ATOMIC_FALLBACK_H
+#define _LINUX_ATOMIC_FALLBACK_H
+
+#include <linux/compiler.h>
+
+#ifndef arch_xchg_relaxed
+#define arch_xchg_acquire arch_xchg
+#define arch_xchg_release arch_xchg
+#define arch_xchg_relaxed arch_xchg
+#else /* arch_xchg_relaxed */
+
+#ifndef arch_xchg_acquire
+#define arch_xchg_acquire(...) \
+       __atomic_op_acquire(arch_xchg, __VA_ARGS__)
+#endif
+
+#ifndef arch_xchg_release
+#define arch_xchg_release(...) \
+       __atomic_op_release(arch_xchg, __VA_ARGS__)
+#endif
+
+#ifndef arch_xchg
+#define arch_xchg(...) \
+       __atomic_op_fence(arch_xchg, __VA_ARGS__)
+#endif
+
+#endif /* arch_xchg_relaxed */
+
+#ifndef arch_cmpxchg_relaxed
+#define arch_cmpxchg_acquire arch_cmpxchg
+#define arch_cmpxchg_release arch_cmpxchg
+#define arch_cmpxchg_relaxed arch_cmpxchg
+#else /* arch_cmpxchg_relaxed */
+
+#ifndef arch_cmpxchg_acquire
+#define arch_cmpxchg_acquire(...) \
+       __atomic_op_acquire(arch_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef arch_cmpxchg_release
+#define arch_cmpxchg_release(...) \
+       __atomic_op_release(arch_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef arch_cmpxchg
+#define arch_cmpxchg(...) \
+       __atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
+#endif
+
+#endif /* arch_cmpxchg_relaxed */
+
+#ifndef arch_cmpxchg64_relaxed
+#define arch_cmpxchg64_acquire arch_cmpxchg64
+#define arch_cmpxchg64_release arch_cmpxchg64
+#define arch_cmpxchg64_relaxed arch_cmpxchg64
+#else /* arch_cmpxchg64_relaxed */
+
+#ifndef arch_cmpxchg64_acquire
+#define arch_cmpxchg64_acquire(...) \
+       __atomic_op_acquire(arch_cmpxchg64, __VA_ARGS__)
+#endif
+
+#ifndef arch_cmpxchg64_release
+#define arch_cmpxchg64_release(...) \
+       __atomic_op_release(arch_cmpxchg64, __VA_ARGS__)
+#endif
+
+#ifndef arch_cmpxchg64
+#define arch_cmpxchg64(...) \
+       __atomic_op_fence(arch_cmpxchg64, __VA_ARGS__)
+#endif
+
+#endif /* arch_cmpxchg64_relaxed */
+
+#ifndef arch_try_cmpxchg_relaxed
+#ifdef arch_try_cmpxchg
+#define arch_try_cmpxchg_acquire arch_try_cmpxchg
+#define arch_try_cmpxchg_release arch_try_cmpxchg
+#define arch_try_cmpxchg_relaxed arch_try_cmpxchg
+#endif /* arch_try_cmpxchg */
+
+#ifndef arch_try_cmpxchg
+#define arch_try_cmpxchg(_ptr, _oldp, _new) \
+({ \
+       typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+       ___r = arch_cmpxchg((_ptr), ___o, (_new)); \
+       if (unlikely(___r != ___o)) \
+               *___op = ___r; \
+       likely(___r == ___o); \
+})
+#endif /* arch_try_cmpxchg */
+
+#ifndef arch_try_cmpxchg_acquire
+#define arch_try_cmpxchg_acquire(_ptr, _oldp, _new) \
+({ \
+       typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+       ___r = arch_cmpxchg_acquire((_ptr), ___o, (_new)); \
+       if (unlikely(___r != ___o)) \
+               *___op = ___r; \
+       likely(___r == ___o); \
+})
+#endif /* arch_try_cmpxchg_acquire */
+
+#ifndef arch_try_cmpxchg_release
+#define arch_try_cmpxchg_release(_ptr, _oldp, _new) \
+({ \
+       typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+       ___r = arch_cmpxchg_release((_ptr), ___o, (_new)); \
+       if (unlikely(___r != ___o)) \
+               *___op = ___r; \
+       likely(___r == ___o); \
+})
+#endif /* arch_try_cmpxchg_release */
+
+#ifndef arch_try_cmpxchg_relaxed
+#define arch_try_cmpxchg_relaxed(_ptr, _oldp, _new) \
+({ \
+       typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+       ___r = arch_cmpxchg_relaxed((_ptr), ___o, (_new)); \
+       if (unlikely(___r != ___o)) \
+               *___op = ___r; \
+       likely(___r == ___o); \
+})
+#endif /* arch_try_cmpxchg_relaxed */
+
+#else /* arch_try_cmpxchg_relaxed */
+
+#ifndef arch_try_cmpxchg_acquire
+#define arch_try_cmpxchg_acquire(...) \
+       __atomic_op_acquire(arch_try_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef arch_try_cmpxchg_release
+#define arch_try_cmpxchg_release(...) \
+       __atomic_op_release(arch_try_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef arch_try_cmpxchg
+#define arch_try_cmpxchg(...) \
+       __atomic_op_fence(arch_try_cmpxchg, __VA_ARGS__)
+#endif
+
+#endif /* arch_try_cmpxchg_relaxed */
+
+#ifndef arch_atomic_read_acquire
+static __always_inline int
+arch_atomic_read_acquire(const atomic_t *v)
+{
+       return smp_load_acquire(&(v)->counter);
+}
+#define arch_atomic_read_acquire arch_atomic_read_acquire
+#endif
+
+#ifndef arch_atomic_set_release
+static __always_inline void
+arch_atomic_set_release(atomic_t *v, int i)
+{
+       smp_store_release(&(v)->counter, i);
+}
+#define arch_atomic_set_release arch_atomic_set_release
+#endif
+
+#ifndef arch_atomic_add_return_relaxed
+#define arch_atomic_add_return_acquire arch_atomic_add_return
+#define arch_atomic_add_return_release arch_atomic_add_return
+#define arch_atomic_add_return_relaxed arch_atomic_add_return
+#else /* arch_atomic_add_return_relaxed */
+
+#ifndef arch_atomic_add_return_acquire
+static __always_inline int
+arch_atomic_add_return_acquire(int i, atomic_t *v)
+{
+       int ret = arch_atomic_add_return_relaxed(i, v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic_add_return_acquire arch_atomic_add_return_acquire
+#endif
+
+#ifndef arch_atomic_add_return_release
+static __always_inline int
+arch_atomic_add_return_release(int i, atomic_t *v)
+{
+       __atomic_release_fence();
+       return arch_atomic_add_return_relaxed(i, v);
+}
+#define arch_atomic_add_return_release arch_atomic_add_return_release
+#endif
+
+#ifndef arch_atomic_add_return
+static __always_inline int
+arch_atomic_add_return(int i, atomic_t *v)
+{
+       int ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic_add_return_relaxed(i, v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic_add_return arch_atomic_add_return
+#endif
+
+#endif /* arch_atomic_add_return_relaxed */
+
+#ifndef arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add
+#define arch_atomic_fetch_add_release arch_atomic_fetch_add
+#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add
+#else /* arch_atomic_fetch_add_relaxed */
+
+#ifndef arch_atomic_fetch_add_acquire
+static __always_inline int
+arch_atomic_fetch_add_acquire(int i, atomic_t *v)
+{
+       int ret = arch_atomic_fetch_add_relaxed(i, v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add_acquire
+#endif
+
+#ifndef arch_atomic_fetch_add_release
+static __always_inline int
+arch_atomic_fetch_add_release(int i, atomic_t *v)
+{
+       __atomic_release_fence();
+       return arch_atomic_fetch_add_relaxed(i, v);
+}
+#define arch_atomic_fetch_add_release arch_atomic_fetch_add_release
+#endif
+
+#ifndef arch_atomic_fetch_add
+static __always_inline int
+arch_atomic_fetch_add(int i, atomic_t *v)
+{
+       int ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic_fetch_add_relaxed(i, v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic_fetch_add arch_atomic_fetch_add
+#endif
+
+#endif /* arch_atomic_fetch_add_relaxed */
+
+#ifndef arch_atomic_sub_return_relaxed
+#define arch_atomic_sub_return_acquire arch_atomic_sub_return
+#define arch_atomic_sub_return_release arch_atomic_sub_return
+#define arch_atomic_sub_return_relaxed arch_atomic_sub_return
+#else /* arch_atomic_sub_return_relaxed */
+
+#ifndef arch_atomic_sub_return_acquire
+static __always_inline int
+arch_atomic_sub_return_acquire(int i, atomic_t *v)
+{
+       int ret = arch_atomic_sub_return_relaxed(i, v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic_sub_return_acquire arch_atomic_sub_return_acquire
+#endif
+
+#ifndef arch_atomic_sub_return_release
+static __always_inline int
+arch_atomic_sub_return_release(int i, atomic_t *v)
+{
+       __atomic_release_fence();
+       return arch_atomic_sub_return_relaxed(i, v);
+}
+#define arch_atomic_sub_return_release arch_atomic_sub_return_release
+#endif
+
+#ifndef arch_atomic_sub_return
+static __always_inline int
+arch_atomic_sub_return(int i, atomic_t *v)
+{
+       int ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic_sub_return_relaxed(i, v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic_sub_return arch_atomic_sub_return
+#endif
+
+#endif /* arch_atomic_sub_return_relaxed */
+
+#ifndef arch_atomic_fetch_sub_relaxed
+#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub
+#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub
+#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub
+#else /* arch_atomic_fetch_sub_relaxed */
+
+#ifndef arch_atomic_fetch_sub_acquire
+static __always_inline int
+arch_atomic_fetch_sub_acquire(int i, atomic_t *v)
+{
+       int ret = arch_atomic_fetch_sub_relaxed(i, v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub_acquire
+#endif
+
+#ifndef arch_atomic_fetch_sub_release
+static __always_inline int
+arch_atomic_fetch_sub_release(int i, atomic_t *v)
+{
+       __atomic_release_fence();
+       return arch_atomic_fetch_sub_relaxed(i, v);
+}
+#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub_release
+#endif
+
+#ifndef arch_atomic_fetch_sub
+static __always_inline int
+arch_atomic_fetch_sub(int i, atomic_t *v)
+{
+       int ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic_fetch_sub_relaxed(i, v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic_fetch_sub arch_atomic_fetch_sub
+#endif
+
+#endif /* arch_atomic_fetch_sub_relaxed */
+
+#ifndef arch_atomic_inc
+static __always_inline void
+arch_atomic_inc(atomic_t *v)
+{
+       arch_atomic_add(1, v);
+}
+#define arch_atomic_inc arch_atomic_inc
+#endif
+
+#ifndef arch_atomic_inc_return_relaxed
+#ifdef arch_atomic_inc_return
+#define arch_atomic_inc_return_acquire arch_atomic_inc_return
+#define arch_atomic_inc_return_release arch_atomic_inc_return
+#define arch_atomic_inc_return_relaxed arch_atomic_inc_return
+#endif /* arch_atomic_inc_return */
+
+#ifndef arch_atomic_inc_return
+static __always_inline int
+arch_atomic_inc_return(atomic_t *v)
+{
+       return arch_atomic_add_return(1, v);
+}
+#define arch_atomic_inc_return arch_atomic_inc_return
+#endif
+
+#ifndef arch_atomic_inc_return_acquire
+static __always_inline int
+arch_atomic_inc_return_acquire(atomic_t *v)
+{
+       return arch_atomic_add_return_acquire(1, v);
+}
+#define arch_atomic_inc_return_acquire arch_atomic_inc_return_acquire
+#endif
+
+#ifndef arch_atomic_inc_return_release
+static __always_inline int
+arch_atomic_inc_return_release(atomic_t *v)
+{
+       return arch_atomic_add_return_release(1, v);
+}
+#define arch_atomic_inc_return_release arch_atomic_inc_return_release
+#endif
+
+#ifndef arch_atomic_inc_return_relaxed
+static __always_inline int
+arch_atomic_inc_return_relaxed(atomic_t *v)
+{
+       return arch_atomic_add_return_relaxed(1, v);
+}
+#define arch_atomic_inc_return_relaxed arch_atomic_inc_return_relaxed
+#endif
+
+#else /* arch_atomic_inc_return_relaxed */
+
+#ifndef arch_atomic_inc_return_acquire
+static __always_inline int
+arch_atomic_inc_return_acquire(atomic_t *v)
+{
+       int ret = arch_atomic_inc_return_relaxed(v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic_inc_return_acquire arch_atomic_inc_return_acquire
+#endif
+
+#ifndef arch_atomic_inc_return_release
+static __always_inline int
+arch_atomic_inc_return_release(atomic_t *v)
+{
+       __atomic_release_fence();
+       return arch_atomic_inc_return_relaxed(v);
+}
+#define arch_atomic_inc_return_release arch_atomic_inc_return_release
+#endif
+
+#ifndef arch_atomic_inc_return
+static __always_inline int
+arch_atomic_inc_return(atomic_t *v)
+{
+       int ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic_inc_return_relaxed(v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic_inc_return arch_atomic_inc_return
+#endif
+
+#endif /* arch_atomic_inc_return_relaxed */
+
+#ifndef arch_atomic_fetch_inc_relaxed
+#ifdef arch_atomic_fetch_inc
+#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc
+#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc
+#define arch_atomic_fetch_inc_relaxed arch_atomic_fetch_inc
+#endif /* arch_atomic_fetch_inc */
+
+#ifndef arch_atomic_fetch_inc
+static __always_inline int
+arch_atomic_fetch_inc(atomic_t *v)
+{
+       return arch_atomic_fetch_add(1, v);
+}
+#define arch_atomic_fetch_inc arch_atomic_fetch_inc
+#endif
+
+#ifndef arch_atomic_fetch_inc_acquire
+static __always_inline int
+arch_atomic_fetch_inc_acquire(atomic_t *v)
+{
+       return arch_atomic_fetch_add_acquire(1, v);
+}
+#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc_acquire
+#endif
+
+#ifndef arch_atomic_fetch_inc_release
+static __always_inline int
+arch_atomic_fetch_inc_release(atomic_t *v)
+{
+       return arch_atomic_fetch_add_release(1, v);
+}
+#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc_release
+#endif
+
+#ifndef arch_atomic_fetch_inc_relaxed
+static __always_inline int
+arch_atomic_fetch_inc_relaxed(atomic_t *v)
+{
+       return arch_atomic_fetch_add_relaxed(1, v);
+}
+#define arch_atomic_fetch_inc_relaxed arch_atomic_fetch_inc_relaxed
+#endif
+
+#else /* arch_atomic_fetch_inc_relaxed */
+
+#ifndef arch_atomic_fetch_inc_acquire
+static __always_inline int
+arch_atomic_fetch_inc_acquire(atomic_t *v)
+{
+       int ret = arch_atomic_fetch_inc_relaxed(v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc_acquire
+#endif
+
+#ifndef arch_atomic_fetch_inc_release
+static __always_inline int
+arch_atomic_fetch_inc_release(atomic_t *v)
+{
+       __atomic_release_fence();
+       return arch_atomic_fetch_inc_relaxed(v);
+}
+#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc_release
+#endif
+
+#ifndef arch_atomic_fetch_inc
+static __always_inline int
+arch_atomic_fetch_inc(atomic_t *v)
+{
+       int ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic_fetch_inc_relaxed(v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic_fetch_inc arch_atomic_fetch_inc
+#endif
+
+#endif /* arch_atomic_fetch_inc_relaxed */
+
+#ifndef arch_atomic_dec
+static __always_inline void
+arch_atomic_dec(atomic_t *v)
+{
+       arch_atomic_sub(1, v);
+}
+#define arch_atomic_dec arch_atomic_dec
+#endif
+
+#ifndef arch_atomic_dec_return_relaxed
+#ifdef arch_atomic_dec_return
+#define arch_atomic_dec_return_acquire arch_atomic_dec_return
+#define arch_atomic_dec_return_release arch_atomic_dec_return
+#define arch_atomic_dec_return_relaxed arch_atomic_dec_return
+#endif /* arch_atomic_dec_return */
+
+#ifndef arch_atomic_dec_return
+static __always_inline int
+arch_atomic_dec_return(atomic_t *v)
+{
+       return arch_atomic_sub_return(1, v);
+}
+#define arch_atomic_dec_return arch_atomic_dec_return
+#endif
+
+#ifndef arch_atomic_dec_return_acquire
+static __always_inline int
+arch_atomic_dec_return_acquire(atomic_t *v)
+{
+       return arch_atomic_sub_return_acquire(1, v);
+}
+#define arch_atomic_dec_return_acquire arch_atomic_dec_return_acquire
+#endif
+
+#ifndef arch_atomic_dec_return_release
+static __always_inline int
+arch_atomic_dec_return_release(atomic_t *v)
+{
+       return arch_atomic_sub_return_release(1, v);
+}
+#define arch_atomic_dec_return_release arch_atomic_dec_return_release
+#endif
+
+#ifndef arch_atomic_dec_return_relaxed
+static __always_inline int
+arch_atomic_dec_return_relaxed(atomic_t *v)
+{
+       return arch_atomic_sub_return_relaxed(1, v);
+}
+#define arch_atomic_dec_return_relaxed arch_atomic_dec_return_relaxed
+#endif
+
+#else /* arch_atomic_dec_return_relaxed */
+
+#ifndef arch_atomic_dec_return_acquire
+static __always_inline int
+arch_atomic_dec_return_acquire(atomic_t *v)
+{
+       int ret = arch_atomic_dec_return_relaxed(v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic_dec_return_acquire arch_atomic_dec_return_acquire
+#endif
+
+#ifndef arch_atomic_dec_return_release
+static __always_inline int
+arch_atomic_dec_return_release(atomic_t *v)
+{
+       __atomic_release_fence();
+       return arch_atomic_dec_return_relaxed(v);
+}
+#define arch_atomic_dec_return_release arch_atomic_dec_return_release
+#endif
+
+#ifndef arch_atomic_dec_return
+static __always_inline int
+arch_atomic_dec_return(atomic_t *v)
+{
+       int ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic_dec_return_relaxed(v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic_dec_return arch_atomic_dec_return
+#endif
+
+#endif /* arch_atomic_dec_return_relaxed */
+
+#ifndef arch_atomic_fetch_dec_relaxed
+#ifdef arch_atomic_fetch_dec
+#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec
+#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec
+#define arch_atomic_fetch_dec_relaxed arch_atomic_fetch_dec
+#endif /* arch_atomic_fetch_dec */
+
+#ifndef arch_atomic_fetch_dec
+static __always_inline int
+arch_atomic_fetch_dec(atomic_t *v)
+{
+       return arch_atomic_fetch_sub(1, v);
+}
+#define arch_atomic_fetch_dec arch_atomic_fetch_dec
+#endif
+
+#ifndef arch_atomic_fetch_dec_acquire
+static __always_inline int
+arch_atomic_fetch_dec_acquire(atomic_t *v)
+{
+       return arch_atomic_fetch_sub_acquire(1, v);
+}
+#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec_acquire
+#endif
+
+#ifndef arch_atomic_fetch_dec_release
+static __always_inline int
+arch_atomic_fetch_dec_release(atomic_t *v)
+{
+       return arch_atomic_fetch_sub_release(1, v);
+}
+#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec_release
+#endif
+
+#ifndef arch_atomic_fetch_dec_relaxed
+static __always_inline int
+arch_atomic_fetch_dec_relaxed(atomic_t *v)
+{
+       return arch_atomic_fetch_sub_relaxed(1, v);
+}
+#define arch_atomic_fetch_dec_relaxed arch_atomic_fetch_dec_relaxed
+#endif
+
+#else /* arch_atomic_fetch_dec_relaxed */
+
+#ifndef arch_atomic_fetch_dec_acquire
+static __always_inline int
+arch_atomic_fetch_dec_acquire(atomic_t *v)
+{
+       int ret = arch_atomic_fetch_dec_relaxed(v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec_acquire
+#endif
+
+#ifndef arch_atomic_fetch_dec_release
+static __always_inline int
+arch_atomic_fetch_dec_release(atomic_t *v)
+{
+       __atomic_release_fence();
+       return arch_atomic_fetch_dec_relaxed(v);
+}
+#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec_release
+#endif
+
+#ifndef arch_atomic_fetch_dec
+static __always_inline int
+arch_atomic_fetch_dec(atomic_t *v)
+{
+       int ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic_fetch_dec_relaxed(v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic_fetch_dec arch_atomic_fetch_dec
+#endif
+
+#endif /* arch_atomic_fetch_dec_relaxed */
+
+#ifndef arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and
+#define arch_atomic_fetch_and_release arch_atomic_fetch_and
+#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and
+#else /* arch_atomic_fetch_and_relaxed */
+
+#ifndef arch_atomic_fetch_and_acquire
+static __always_inline int
+arch_atomic_fetch_and_acquire(int i, atomic_t *v)
+{
+       int ret = arch_atomic_fetch_and_relaxed(i, v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and_acquire
+#endif
+
+#ifndef arch_atomic_fetch_and_release
+static __always_inline int
+arch_atomic_fetch_and_release(int i, atomic_t *v)
+{
+       __atomic_release_fence();
+       return arch_atomic_fetch_and_relaxed(i, v);
+}
+#define arch_atomic_fetch_and_release arch_atomic_fetch_and_release
+#endif
+
+#ifndef arch_atomic_fetch_and
+static __always_inline int
+arch_atomic_fetch_and(int i, atomic_t *v)
+{
+       int ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic_fetch_and_relaxed(i, v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic_fetch_and arch_atomic_fetch_and
+#endif
+
+#endif /* arch_atomic_fetch_and_relaxed */
+
+#ifndef arch_atomic_andnot
+static __always_inline void
+arch_atomic_andnot(int i, atomic_t *v)
+{
+       arch_atomic_and(~i, v);
+}
+#define arch_atomic_andnot arch_atomic_andnot
+#endif
+
+#ifndef arch_atomic_fetch_andnot_relaxed
+#ifdef arch_atomic_fetch_andnot
+#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot
+#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot
+#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot
+#endif /* arch_atomic_fetch_andnot */
+
+#ifndef arch_atomic_fetch_andnot
+static __always_inline int
+arch_atomic_fetch_andnot(int i, atomic_t *v)
+{
+       return arch_atomic_fetch_and(~i, v);
+}
+#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
+#endif
+
+#ifndef arch_atomic_fetch_andnot_acquire
+static __always_inline int
+arch_atomic_fetch_andnot_acquire(int i, atomic_t *v)
+{
+       return arch_atomic_fetch_and_acquire(~i, v);
+}
+#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire
+#endif
+
+#ifndef arch_atomic_fetch_andnot_release
+static __always_inline int
+arch_atomic_fetch_andnot_release(int i, atomic_t *v)
+{
+       return arch_atomic_fetch_and_release(~i, v);
+}
+#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot_release
+#endif
+
+#ifndef arch_atomic_fetch_andnot_relaxed
+static __always_inline int
+arch_atomic_fetch_andnot_relaxed(int i, atomic_t *v)
+{
+       return arch_atomic_fetch_and_relaxed(~i, v);
+}
+#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed
+#endif
+
+#else /* arch_atomic_fetch_andnot_relaxed */
+
+#ifndef arch_atomic_fetch_andnot_acquire
+static __always_inline int
+arch_atomic_fetch_andnot_acquire(int i, atomic_t *v)
+{
+       int ret = arch_atomic_fetch_andnot_relaxed(i, v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire
+#endif
+
+#ifndef arch_atomic_fetch_andnot_release
+static __always_inline int
+arch_atomic_fetch_andnot_release(int i, atomic_t *v)
+{
+       __atomic_release_fence();
+       return arch_atomic_fetch_andnot_relaxed(i, v);
+}
+#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot_release
+#endif
+
+#ifndef arch_atomic_fetch_andnot
+static __always_inline int
+arch_atomic_fetch_andnot(int i, atomic_t *v)
+{
+       int ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic_fetch_andnot_relaxed(i, v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
+#endif
+
+#endif /* arch_atomic_fetch_andnot_relaxed */
+
+#ifndef arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or
+#define arch_atomic_fetch_or_release arch_atomic_fetch_or
+#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or
+#else /* arch_atomic_fetch_or_relaxed */
+
+#ifndef arch_atomic_fetch_or_acquire
+static __always_inline int
+arch_atomic_fetch_or_acquire(int i, atomic_t *v)
+{
+       int ret = arch_atomic_fetch_or_relaxed(i, v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or_acquire
+#endif
+
+#ifndef arch_atomic_fetch_or_release
+static __always_inline int
+arch_atomic_fetch_or_release(int i, atomic_t *v)
+{
+       __atomic_release_fence();
+       return arch_atomic_fetch_or_relaxed(i, v);
+}
+#define arch_atomic_fetch_or_release arch_atomic_fetch_or_release
+#endif
+
+#ifndef arch_atomic_fetch_or
+static __always_inline int
+arch_atomic_fetch_or(int i, atomic_t *v)
+{
+       int ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic_fetch_or_relaxed(i, v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic_fetch_or arch_atomic_fetch_or
+#endif
+
+#endif /* arch_atomic_fetch_or_relaxed */
+
+#ifndef arch_atomic_fetch_xor_relaxed
+#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor
+#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor
+#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor
+#else /* arch_atomic_fetch_xor_relaxed */
+
+#ifndef arch_atomic_fetch_xor_acquire
+static __always_inline int
+arch_atomic_fetch_xor_acquire(int i, atomic_t *v)
+{
+       int ret = arch_atomic_fetch_xor_relaxed(i, v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor_acquire
+#endif
+
+#ifndef arch_atomic_fetch_xor_release
+static __always_inline int
+arch_atomic_fetch_xor_release(int i, atomic_t *v)
+{
+       __atomic_release_fence();
+       return arch_atomic_fetch_xor_relaxed(i, v);
+}
+#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor_release
+#endif
+
+#ifndef arch_atomic_fetch_xor
+static __always_inline int
+arch_atomic_fetch_xor(int i, atomic_t *v)
+{
+       int ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic_fetch_xor_relaxed(i, v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic_fetch_xor arch_atomic_fetch_xor
+#endif
+
+#endif /* arch_atomic_fetch_xor_relaxed */
+
+#ifndef arch_atomic_xchg_relaxed
+#define arch_atomic_xchg_acquire arch_atomic_xchg
+#define arch_atomic_xchg_release arch_atomic_xchg
+#define arch_atomic_xchg_relaxed arch_atomic_xchg
+#else /* arch_atomic_xchg_relaxed */
+
+#ifndef arch_atomic_xchg_acquire
+static __always_inline int
+arch_atomic_xchg_acquire(atomic_t *v, int i)
+{
+       int ret = arch_atomic_xchg_relaxed(v, i);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic_xchg_acquire arch_atomic_xchg_acquire
+#endif
+
+#ifndef arch_atomic_xchg_release
+static __always_inline int
+arch_atomic_xchg_release(atomic_t *v, int i)
+{
+       __atomic_release_fence();
+       return arch_atomic_xchg_relaxed(v, i);
+}
+#define arch_atomic_xchg_release arch_atomic_xchg_release
+#endif
+
+#ifndef arch_atomic_xchg
+static __always_inline int
+arch_atomic_xchg(atomic_t *v, int i)
+{
+       int ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic_xchg_relaxed(v, i);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic_xchg arch_atomic_xchg
+#endif
+
+#endif /* arch_atomic_xchg_relaxed */
+
+#ifndef arch_atomic_cmpxchg_relaxed
+#define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg
+#define arch_atomic_cmpxchg_release arch_atomic_cmpxchg
+#define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg
+#else /* arch_atomic_cmpxchg_relaxed */
+
+#ifndef arch_atomic_cmpxchg_acquire
+static __always_inline int
+arch_atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
+{
+       int ret = arch_atomic_cmpxchg_relaxed(v, old, new);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg_acquire
+#endif
+
+#ifndef arch_atomic_cmpxchg_release
+static __always_inline int
+arch_atomic_cmpxchg_release(atomic_t *v, int old, int new)
+{
+       __atomic_release_fence();
+       return arch_atomic_cmpxchg_relaxed(v, old, new);
+}
+#define arch_atomic_cmpxchg_release arch_atomic_cmpxchg_release
+#endif
+
+#ifndef arch_atomic_cmpxchg
+static __always_inline int
+arch_atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+       int ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic_cmpxchg_relaxed(v, old, new);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic_cmpxchg arch_atomic_cmpxchg
+#endif
+
+#endif /* arch_atomic_cmpxchg_relaxed */
+
+#ifndef arch_atomic_try_cmpxchg_relaxed
+#ifdef arch_atomic_try_cmpxchg
+#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg
+#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg
+#define arch_atomic_try_cmpxchg_relaxed arch_atomic_try_cmpxchg
+#endif /* arch_atomic_try_cmpxchg */
+
+#ifndef arch_atomic_try_cmpxchg
+static __always_inline bool
+arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
+{
+       int r, o = *old;
+       r = arch_atomic_cmpxchg(v, o, new);
+       if (unlikely(r != o))
+               *old = r;
+       return likely(r == o);
+}
+#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
+#endif
+
+#ifndef arch_atomic_try_cmpxchg_acquire
+static __always_inline bool
+arch_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
+{
+       int r, o = *old;
+       r = arch_atomic_cmpxchg_acquire(v, o, new);
+       if (unlikely(r != o))
+               *old = r;
+       return likely(r == o);
+}
+#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg_acquire
+#endif
+
+#ifndef arch_atomic_try_cmpxchg_release
+static __always_inline bool
+arch_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
+{
+       int r, o = *old;
+       r = arch_atomic_cmpxchg_release(v, o, new);
+       if (unlikely(r != o))
+               *old = r;
+       return likely(r == o);
+}
+#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg_release
+#endif
+
+#ifndef arch_atomic_try_cmpxchg_relaxed
+static __always_inline bool
+arch_atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
+{
+       int r, o = *old;
+       r = arch_atomic_cmpxchg_relaxed(v, o, new);
+       if (unlikely(r != o))
+               *old = r;
+       return likely(r == o);
+}
+#define arch_atomic_try_cmpxchg_relaxed arch_atomic_try_cmpxchg_relaxed
+#endif
+
+#else /* arch_atomic_try_cmpxchg_relaxed */
+
+#ifndef arch_atomic_try_cmpxchg_acquire
+static __always_inline bool
+arch_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
+{
+       bool ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg_acquire
+#endif
+
+#ifndef arch_atomic_try_cmpxchg_release
+static __always_inline bool
+arch_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
+{
+       __atomic_release_fence();
+       return arch_atomic_try_cmpxchg_relaxed(v, old, new);
+}
+#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg_release
+#endif
+
+#ifndef arch_atomic_try_cmpxchg
+static __always_inline bool
+arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
+{
+       bool ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
+#endif
+
+#endif /* arch_atomic_try_cmpxchg_relaxed */
+
+#ifndef arch_atomic_sub_and_test
+/**
+ * arch_atomic_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_t
+ *
+ * Atomically subtracts @i from @v and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+static __always_inline bool
+arch_atomic_sub_and_test(int i, atomic_t *v)
+{
+       return arch_atomic_sub_return(i, v) == 0;
+}
+#define arch_atomic_sub_and_test arch_atomic_sub_and_test
+#endif
+
+#ifndef arch_atomic_dec_and_test
+/**
+ * arch_atomic_dec_and_test - decrement and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically decrements @v by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */
+static __always_inline bool
+arch_atomic_dec_and_test(atomic_t *v)
+{
+       return arch_atomic_dec_return(v) == 0;
+}
+#define arch_atomic_dec_and_test arch_atomic_dec_and_test
+#endif
+
+#ifndef arch_atomic_inc_and_test
+/**
+ * arch_atomic_inc_and_test - increment and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+static __always_inline bool
+arch_atomic_inc_and_test(atomic_t *v)
+{
+       return arch_atomic_inc_return(v) == 0;
+}
+#define arch_atomic_inc_and_test arch_atomic_inc_and_test
+#endif
+
+#ifndef arch_atomic_add_negative
+/**
+ * arch_atomic_add_negative - add and test if negative
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+ *
+ * Atomically adds @i to @v and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero.
+ */
+static __always_inline bool
+arch_atomic_add_negative(int i, atomic_t *v)
+{
+       return arch_atomic_add_return(i, v) < 0;
+}
+#define arch_atomic_add_negative arch_atomic_add_negative
+#endif
+
+#ifndef arch_atomic_fetch_add_unless
+/**
+ * arch_atomic_fetch_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as @v was not already @u.
+ * Returns original value of @v
+ */
+static __always_inline int
+arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
+{
+       int c = arch_atomic_read(v);
+
+       do {
+               if (unlikely(c == u))
+                       break;
+       } while (!arch_atomic_try_cmpxchg(v, &c, c + a));
+
+       return c;
+}
+#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
+#endif
+
+#ifndef arch_atomic_add_unless
+/**
+ * arch_atomic_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, if @v was not already @u.
+ * Returns true if the addition was done.
+ */
+static __always_inline bool
+arch_atomic_add_unless(atomic_t *v, int a, int u)
+{
+       return arch_atomic_fetch_add_unless(v, a, u) != u;
+}
+#define arch_atomic_add_unless arch_atomic_add_unless
+#endif
+
+#ifndef arch_atomic_inc_not_zero
+/**
+ * arch_atomic_inc_not_zero - increment unless the number is zero
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1, if @v is non-zero.
+ * Returns true if the increment was done.
+ */
+static __always_inline bool
+arch_atomic_inc_not_zero(atomic_t *v)
+{
+       return arch_atomic_add_unless(v, 1, 0);
+}
+#define arch_atomic_inc_not_zero arch_atomic_inc_not_zero
+#endif
+
+#ifndef arch_atomic_inc_unless_negative
+static __always_inline bool
+arch_atomic_inc_unless_negative(atomic_t *v)
+{
+       int c = arch_atomic_read(v);
+
+       do {
+               if (unlikely(c < 0))
+                       return false;
+       } while (!arch_atomic_try_cmpxchg(v, &c, c + 1));
+
+       return true;
+}
+#define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative
+#endif
+
+#ifndef arch_atomic_dec_unless_positive
+static __always_inline bool
+arch_atomic_dec_unless_positive(atomic_t *v)
+{
+       int c = arch_atomic_read(v);
+
+       do {
+               if (unlikely(c > 0))
+                       return false;
+       } while (!arch_atomic_try_cmpxchg(v, &c, c - 1));
+
+       return true;
+}
+#define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive
+#endif
+
+#ifndef arch_atomic_dec_if_positive
+static __always_inline int
+arch_atomic_dec_if_positive(atomic_t *v)
+{
+       int dec, c = arch_atomic_read(v);
+
+       do {
+               dec = c - 1;
+               if (unlikely(dec < 0))
+                       break;
+       } while (!arch_atomic_try_cmpxchg(v, &c, dec));
+
+       return dec;
+}
+#define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
+#endif
+
+#ifdef CONFIG_GENERIC_ATOMIC64
+#include <asm-generic/atomic64.h>
+#endif
+
+#ifndef arch_atomic64_read_acquire
+static __always_inline s64
+arch_atomic64_read_acquire(const atomic64_t *v)
+{
+       return smp_load_acquire(&(v)->counter);
+}
+#define arch_atomic64_read_acquire arch_atomic64_read_acquire
+#endif
+
+#ifndef arch_atomic64_set_release
+static __always_inline void
+arch_atomic64_set_release(atomic64_t *v, s64 i)
+{
+       smp_store_release(&(v)->counter, i);
+}
+#define arch_atomic64_set_release arch_atomic64_set_release
+#endif
+
+#ifndef arch_atomic64_add_return_relaxed
+#define arch_atomic64_add_return_acquire arch_atomic64_add_return
+#define arch_atomic64_add_return_release arch_atomic64_add_return
+#define arch_atomic64_add_return_relaxed arch_atomic64_add_return
+#else /* arch_atomic64_add_return_relaxed */
+
+#ifndef arch_atomic64_add_return_acquire
+static __always_inline s64
+arch_atomic64_add_return_acquire(s64 i, atomic64_t *v)
+{
+       s64 ret = arch_atomic64_add_return_relaxed(i, v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic64_add_return_acquire arch_atomic64_add_return_acquire
+#endif
+
+#ifndef arch_atomic64_add_return_release
+static __always_inline s64
+arch_atomic64_add_return_release(s64 i, atomic64_t *v)
+{
+       __atomic_release_fence();
+       return arch_atomic64_add_return_relaxed(i, v);
+}
+#define arch_atomic64_add_return_release arch_atomic64_add_return_release
+#endif
+
+#ifndef arch_atomic64_add_return
+static __always_inline s64
+arch_atomic64_add_return(s64 i, atomic64_t *v)
+{
+       s64 ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic64_add_return_relaxed(i, v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic64_add_return arch_atomic64_add_return
+#endif
+
+#endif /* arch_atomic64_add_return_relaxed */
+
+#ifndef arch_atomic64_fetch_add_relaxed
+#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add
+#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add
+#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add
+#else /* arch_atomic64_fetch_add_relaxed */
+
+#ifndef arch_atomic64_fetch_add_acquire
+static __always_inline s64
+arch_atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
+{
+       s64 ret = arch_atomic64_fetch_add_relaxed(i, v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_add_release
+static __always_inline s64
+arch_atomic64_fetch_add_release(s64 i, atomic64_t *v)
+{
+       __atomic_release_fence();
+       return arch_atomic64_fetch_add_relaxed(i, v);
+}
+#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add_release
+#endif
+
+#ifndef arch_atomic64_fetch_add
+static __always_inline s64
+arch_atomic64_fetch_add(s64 i, atomic64_t *v)
+{
+       s64 ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic64_fetch_add_relaxed(i, v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic64_fetch_add arch_atomic64_fetch_add
+#endif
+
+#endif /* arch_atomic64_fetch_add_relaxed */
+
+#ifndef arch_atomic64_sub_return_relaxed
+#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return
+#define arch_atomic64_sub_return_release arch_atomic64_sub_return
+#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return
+#else /* arch_atomic64_sub_return_relaxed */
+
+#ifndef arch_atomic64_sub_return_acquire
+static __always_inline s64
+arch_atomic64_sub_return_acquire(s64 i, atomic64_t *v)
+{
+       s64 ret = arch_atomic64_sub_return_relaxed(i, v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return_acquire
+#endif
+
+#ifndef arch_atomic64_sub_return_release
+static __always_inline s64
+arch_atomic64_sub_return_release(s64 i, atomic64_t *v)
+{
+       __atomic_release_fence();
+       return arch_atomic64_sub_return_relaxed(i, v);
+}
+#define arch_atomic64_sub_return_release arch_atomic64_sub_return_release
+#endif
+
+#ifndef arch_atomic64_sub_return
+static __always_inline s64
+arch_atomic64_sub_return(s64 i, atomic64_t *v)
+{
+       s64 ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic64_sub_return_relaxed(i, v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic64_sub_return arch_atomic64_sub_return
+#endif
+
+#endif /* arch_atomic64_sub_return_relaxed */
+
+#ifndef arch_atomic64_fetch_sub_relaxed
+#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub
+#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub
+#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub
+#else /* arch_atomic64_fetch_sub_relaxed */
+
+#ifndef arch_atomic64_fetch_sub_acquire
+static __always_inline s64
+arch_atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
+{
+       s64 ret = arch_atomic64_fetch_sub_relaxed(i, v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_sub_release
+static __always_inline s64
+arch_atomic64_fetch_sub_release(s64 i, atomic64_t *v)
+{
+       __atomic_release_fence();
+       return arch_atomic64_fetch_sub_relaxed(i, v);
+}
+#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub_release
+#endif
+
+#ifndef arch_atomic64_fetch_sub
+static __always_inline s64
+arch_atomic64_fetch_sub(s64 i, atomic64_t *v)
+{
+       s64 ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic64_fetch_sub_relaxed(i, v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
+#endif
+
+#endif /* arch_atomic64_fetch_sub_relaxed */
+
+#ifndef arch_atomic64_inc
+static __always_inline void
+arch_atomic64_inc(atomic64_t *v)
+{
+       arch_atomic64_add(1, v);
+}
+#define arch_atomic64_inc arch_atomic64_inc
+#endif
+
+#ifndef arch_atomic64_inc_return_relaxed
+#ifdef arch_atomic64_inc_return
+#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return
+#define arch_atomic64_inc_return_release arch_atomic64_inc_return
+#define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return
+#endif /* arch_atomic64_inc_return */
+
+#ifndef arch_atomic64_inc_return
+static __always_inline s64
+arch_atomic64_inc_return(atomic64_t *v)
+{
+       return arch_atomic64_add_return(1, v);
+}
+#define arch_atomic64_inc_return arch_atomic64_inc_return
+#endif
+
+#ifndef arch_atomic64_inc_return_acquire
+static __always_inline s64
+arch_atomic64_inc_return_acquire(atomic64_t *v)
+{
+       return arch_atomic64_add_return_acquire(1, v);
+}
+#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return_acquire
+#endif
+
+#ifndef arch_atomic64_inc_return_release
+static __always_inline s64
+arch_atomic64_inc_return_release(atomic64_t *v)
+{
+       return arch_atomic64_add_return_release(1, v);
+}
+#define arch_atomic64_inc_return_release arch_atomic64_inc_return_release
+#endif
+
+#ifndef arch_atomic64_inc_return_relaxed
+static __always_inline s64
+arch_atomic64_inc_return_relaxed(atomic64_t *v)
+{
+       return arch_atomic64_add_return_relaxed(1, v);
+}
+#define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return_relaxed
+#endif
+
+#else /* arch_atomic64_inc_return_relaxed */
+
+#ifndef arch_atomic64_inc_return_acquire
+static __always_inline s64
+arch_atomic64_inc_return_acquire(atomic64_t *v)
+{
+       s64 ret = arch_atomic64_inc_return_relaxed(v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return_acquire
+#endif
+
+#ifndef arch_atomic64_inc_return_release
+static __always_inline s64
+arch_atomic64_inc_return_release(atomic64_t *v)
+{
+       __atomic_release_fence();
+       return arch_atomic64_inc_return_relaxed(v);
+}
+#define arch_atomic64_inc_return_release arch_atomic64_inc_return_release
+#endif
+
+#ifndef arch_atomic64_inc_return
+static __always_inline s64
+arch_atomic64_inc_return(atomic64_t *v)
+{
+       s64 ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic64_inc_return_relaxed(v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic64_inc_return arch_atomic64_inc_return
+#endif
+
+#endif /* arch_atomic64_inc_return_relaxed */
+
+#ifndef arch_atomic64_fetch_inc_relaxed
+#ifdef arch_atomic64_fetch_inc
+#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc
+#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc
+#define arch_atomic64_fetch_inc_relaxed arch_atomic64_fetch_inc
+#endif /* arch_atomic64_fetch_inc */
+
+#ifndef arch_atomic64_fetch_inc
+static __always_inline s64
+arch_atomic64_fetch_inc(atomic64_t *v)
+{
+       return arch_atomic64_fetch_add(1, v);
+}
+#define arch_atomic64_fetch_inc arch_atomic64_fetch_inc
+#endif
+
+#ifndef arch_atomic64_fetch_inc_acquire
+static __always_inline s64
+arch_atomic64_fetch_inc_acquire(atomic64_t *v)
+{
+       return arch_atomic64_fetch_add_acquire(1, v);
+}
+#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_inc_release
+static __always_inline s64
+arch_atomic64_fetch_inc_release(atomic64_t *v)
+{
+       return arch_atomic64_fetch_add_release(1, v);
+}
+#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc_release
+#endif
+
+#ifndef arch_atomic64_fetch_inc_relaxed
+static __always_inline s64
+arch_atomic64_fetch_inc_relaxed(atomic64_t *v)
+{
+       return arch_atomic64_fetch_add_relaxed(1, v);
+}
+#define arch_atomic64_fetch_inc_relaxed arch_atomic64_fetch_inc_relaxed
+#endif
+
+#else /* arch_atomic64_fetch_inc_relaxed */
+
+#ifndef arch_atomic64_fetch_inc_acquire
+static __always_inline s64
+arch_atomic64_fetch_inc_acquire(atomic64_t *v)
+{
+       s64 ret = arch_atomic64_fetch_inc_relaxed(v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_inc_release
+static __always_inline s64
+arch_atomic64_fetch_inc_release(atomic64_t *v)
+{
+       __atomic_release_fence();
+       return arch_atomic64_fetch_inc_relaxed(v);
+}
+#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc_release
+#endif
+
+#ifndef arch_atomic64_fetch_inc
+static __always_inline s64
+arch_atomic64_fetch_inc(atomic64_t *v)
+{
+       s64 ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic64_fetch_inc_relaxed(v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic64_fetch_inc arch_atomic64_fetch_inc
+#endif
+
+#endif /* arch_atomic64_fetch_inc_relaxed */
+
+#ifndef arch_atomic64_dec
+static __always_inline void
+arch_atomic64_dec(atomic64_t *v)
+{
+       arch_atomic64_sub(1, v);
+}
+#define arch_atomic64_dec arch_atomic64_dec
+#endif
+
+#ifndef arch_atomic64_dec_return_relaxed
+#ifdef arch_atomic64_dec_return
+#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return
+#define arch_atomic64_dec_return_release arch_atomic64_dec_return
+#define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return
+#endif /* arch_atomic64_dec_return */
+
+#ifndef arch_atomic64_dec_return
+static __always_inline s64
+arch_atomic64_dec_return(atomic64_t *v)
+{
+       return arch_atomic64_sub_return(1, v);
+}
+#define arch_atomic64_dec_return arch_atomic64_dec_return
+#endif
+
+#ifndef arch_atomic64_dec_return_acquire
+static __always_inline s64
+arch_atomic64_dec_return_acquire(atomic64_t *v)
+{
+       return arch_atomic64_sub_return_acquire(1, v);
+}
+#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return_acquire
+#endif
+
+#ifndef arch_atomic64_dec_return_release
+static __always_inline s64
+arch_atomic64_dec_return_release(atomic64_t *v)
+{
+       return arch_atomic64_sub_return_release(1, v);
+}
+#define arch_atomic64_dec_return_release arch_atomic64_dec_return_release
+#endif
+
+#ifndef arch_atomic64_dec_return_relaxed
+static __always_inline s64
+arch_atomic64_dec_return_relaxed(atomic64_t *v)
+{
+       return arch_atomic64_sub_return_relaxed(1, v);
+}
+#define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return_relaxed
+#endif
+
+#else /* arch_atomic64_dec_return_relaxed */
+
+#ifndef arch_atomic64_dec_return_acquire
+static __always_inline s64
+arch_atomic64_dec_return_acquire(atomic64_t *v)
+{
+       s64 ret = arch_atomic64_dec_return_relaxed(v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return_acquire
+#endif
+
+#ifndef arch_atomic64_dec_return_release
+static __always_inline s64
+arch_atomic64_dec_return_release(atomic64_t *v)
+{
+       __atomic_release_fence();
+       return arch_atomic64_dec_return_relaxed(v);
+}
+#define arch_atomic64_dec_return_release arch_atomic64_dec_return_release
+#endif
+
+#ifndef arch_atomic64_dec_return
+static __always_inline s64
+arch_atomic64_dec_return(atomic64_t *v)
+{
+       s64 ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic64_dec_return_relaxed(v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic64_dec_return arch_atomic64_dec_return
+#endif
+
+#endif /* arch_atomic64_dec_return_relaxed */
+
+#ifndef arch_atomic64_fetch_dec_relaxed
+#ifdef arch_atomic64_fetch_dec
+#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec
+#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec
+#define arch_atomic64_fetch_dec_relaxed arch_atomic64_fetch_dec
+#endif /* arch_atomic64_fetch_dec */
+
+#ifndef arch_atomic64_fetch_dec
+static __always_inline s64
+arch_atomic64_fetch_dec(atomic64_t *v)
+{
+       return arch_atomic64_fetch_sub(1, v);
+}
+#define arch_atomic64_fetch_dec arch_atomic64_fetch_dec
+#endif
+
+#ifndef arch_atomic64_fetch_dec_acquire
+static __always_inline s64
+arch_atomic64_fetch_dec_acquire(atomic64_t *v)
+{
+       return arch_atomic64_fetch_sub_acquire(1, v);
+}
+#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_dec_release
+static __always_inline s64
+arch_atomic64_fetch_dec_release(atomic64_t *v)
+{
+       return arch_atomic64_fetch_sub_release(1, v);
+}
+#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec_release
+#endif
+
+#ifndef arch_atomic64_fetch_dec_relaxed
+static __always_inline s64
+arch_atomic64_fetch_dec_relaxed(atomic64_t *v)
+{
+       return arch_atomic64_fetch_sub_relaxed(1, v);
+}
+#define arch_atomic64_fetch_dec_relaxed arch_atomic64_fetch_dec_relaxed
+#endif
+
+#else /* arch_atomic64_fetch_dec_relaxed */
+
+#ifndef arch_atomic64_fetch_dec_acquire
+static __always_inline s64
+arch_atomic64_fetch_dec_acquire(atomic64_t *v)
+{
+       s64 ret = arch_atomic64_fetch_dec_relaxed(v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_dec_release
+static __always_inline s64
+arch_atomic64_fetch_dec_release(atomic64_t *v)
+{
+       __atomic_release_fence();
+       return arch_atomic64_fetch_dec_relaxed(v);
+}
+#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec_release
+#endif
+
+#ifndef arch_atomic64_fetch_dec
+static __always_inline s64
+arch_atomic64_fetch_dec(atomic64_t *v)
+{
+       s64 ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic64_fetch_dec_relaxed(v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic64_fetch_dec arch_atomic64_fetch_dec
+#endif
+
+#endif /* arch_atomic64_fetch_dec_relaxed */
+
+#ifndef arch_atomic64_fetch_and_relaxed
+#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and
+#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and
+#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and
+#else /* arch_atomic64_fetch_and_relaxed */
+
+#ifndef arch_atomic64_fetch_and_acquire
+static __always_inline s64
+arch_atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
+{
+       s64 ret = arch_atomic64_fetch_and_relaxed(i, v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_and_release
+static __always_inline s64
+arch_atomic64_fetch_and_release(s64 i, atomic64_t *v)
+{
+       __atomic_release_fence();
+       return arch_atomic64_fetch_and_relaxed(i, v);
+}
+#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and_release
+#endif
+
+#ifndef arch_atomic64_fetch_and
+static __always_inline s64
+arch_atomic64_fetch_and(s64 i, atomic64_t *v)
+{
+       s64 ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic64_fetch_and_relaxed(i, v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic64_fetch_and arch_atomic64_fetch_and
+#endif
+
+#endif /* arch_atomic64_fetch_and_relaxed */
+
+#ifndef arch_atomic64_andnot
+static __always_inline void
+arch_atomic64_andnot(s64 i, atomic64_t *v)
+{
+       arch_atomic64_and(~i, v);
+}
+#define arch_atomic64_andnot arch_atomic64_andnot
+#endif
+
+#ifndef arch_atomic64_fetch_andnot_relaxed
+#ifdef arch_atomic64_fetch_andnot
+#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot
+#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot
+#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot
+#endif /* arch_atomic64_fetch_andnot */
+
+#ifndef arch_atomic64_fetch_andnot
+static __always_inline s64
+arch_atomic64_fetch_andnot(s64 i, atomic64_t *v)
+{
+       return arch_atomic64_fetch_and(~i, v);
+}
+#define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot
+#endif
+
+#ifndef arch_atomic64_fetch_andnot_acquire
+static __always_inline s64
+arch_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
+{
+       return arch_atomic64_fetch_and_acquire(~i, v);
+}
+#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_andnot_release
+static __always_inline s64
+arch_atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
+{
+       return arch_atomic64_fetch_and_release(~i, v);
+}
+#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot_release
+#endif
+
+#ifndef arch_atomic64_fetch_andnot_relaxed
+static __always_inline s64
+arch_atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
+{
+       return arch_atomic64_fetch_and_relaxed(~i, v);
+}
+#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed
+#endif
+
+#else /* arch_atomic64_fetch_andnot_relaxed */
+
+#ifndef arch_atomic64_fetch_andnot_acquire
+static __always_inline s64
+arch_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
+{
+       s64 ret = arch_atomic64_fetch_andnot_relaxed(i, v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_andnot_release
+static __always_inline s64
+arch_atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
+{
+       __atomic_release_fence();
+       return arch_atomic64_fetch_andnot_relaxed(i, v);
+}
+#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot_release
+#endif
+
+#ifndef arch_atomic64_fetch_andnot
+static __always_inline s64
+arch_atomic64_fetch_andnot(s64 i, atomic64_t *v)
+{
+       s64 ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic64_fetch_andnot_relaxed(i, v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot
+#endif
+
+#endif /* arch_atomic64_fetch_andnot_relaxed */
+
+#ifndef arch_atomic64_fetch_or_relaxed
+#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or
+#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or
+#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or
+#else /* arch_atomic64_fetch_or_relaxed */
+
+#ifndef arch_atomic64_fetch_or_acquire
+static __always_inline s64
+arch_atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
+{
+       s64 ret = arch_atomic64_fetch_or_relaxed(i, v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_or_release
+static __always_inline s64
+arch_atomic64_fetch_or_release(s64 i, atomic64_t *v)
+{
+       __atomic_release_fence();
+       return arch_atomic64_fetch_or_relaxed(i, v);
+}
+#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or_release
+#endif
+
+#ifndef arch_atomic64_fetch_or
+static __always_inline s64
+arch_atomic64_fetch_or(s64 i, atomic64_t *v)
+{
+       s64 ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic64_fetch_or_relaxed(i, v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic64_fetch_or arch_atomic64_fetch_or
+#endif
+
+#endif /* arch_atomic64_fetch_or_relaxed */
+
+#ifndef arch_atomic64_fetch_xor_relaxed
+#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor
+#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor
+#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor
+#else /* arch_atomic64_fetch_xor_relaxed */
+
+#ifndef arch_atomic64_fetch_xor_acquire
+static __always_inline s64
+arch_atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
+{
+       s64 ret = arch_atomic64_fetch_xor_relaxed(i, v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_xor_release
+static __always_inline s64
+arch_atomic64_fetch_xor_release(s64 i, atomic64_t *v)
+{
+       __atomic_release_fence();
+       return arch_atomic64_fetch_xor_relaxed(i, v);
+}
+#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor_release
+#endif
+
+#ifndef arch_atomic64_fetch_xor
+static __always_inline s64
+arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
+{
+       s64 ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic64_fetch_xor_relaxed(i, v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
+#endif
+
+#endif /* arch_atomic64_fetch_xor_relaxed */
+
+#ifndef arch_atomic64_xchg_relaxed
+#define arch_atomic64_xchg_acquire arch_atomic64_xchg
+#define arch_atomic64_xchg_release arch_atomic64_xchg
+#define arch_atomic64_xchg_relaxed arch_atomic64_xchg
+#else /* arch_atomic64_xchg_relaxed */
+
+#ifndef arch_atomic64_xchg_acquire
+static __always_inline s64
+arch_atomic64_xchg_acquire(atomic64_t *v, s64 i)
+{
+       s64 ret = arch_atomic64_xchg_relaxed(v, i);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic64_xchg_acquire arch_atomic64_xchg_acquire
+#endif
+
+#ifndef arch_atomic64_xchg_release
+static __always_inline s64
+arch_atomic64_xchg_release(atomic64_t *v, s64 i)
+{
+       __atomic_release_fence();
+       return arch_atomic64_xchg_relaxed(v, i);
+}
+#define arch_atomic64_xchg_release arch_atomic64_xchg_release
+#endif
+
+#ifndef arch_atomic64_xchg
+static __always_inline s64
+arch_atomic64_xchg(atomic64_t *v, s64 i)
+{
+       s64 ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic64_xchg_relaxed(v, i);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic64_xchg arch_atomic64_xchg
+#endif
+
+#endif /* arch_atomic64_xchg_relaxed */
+
+#ifndef arch_atomic64_cmpxchg_relaxed
+#define arch_atomic64_cmpxchg_acquire arch_atomic64_cmpxchg
+#define arch_atomic64_cmpxchg_release arch_atomic64_cmpxchg
+#define arch_atomic64_cmpxchg_relaxed arch_atomic64_cmpxchg
+#else /* arch_atomic64_cmpxchg_relaxed */
+
+#ifndef arch_atomic64_cmpxchg_acquire
+static __always_inline s64
+arch_atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
+{
+       s64 ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic64_cmpxchg_acquire arch_atomic64_cmpxchg_acquire
+#endif
+
+#ifndef arch_atomic64_cmpxchg_release
+static __always_inline s64
+arch_atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
+{
+       __atomic_release_fence();
+       return arch_atomic64_cmpxchg_relaxed(v, old, new);
+}
+#define arch_atomic64_cmpxchg_release arch_atomic64_cmpxchg_release
+#endif
+
+#ifndef arch_atomic64_cmpxchg
+static __always_inline s64
+arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
+{
+       s64 ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
+#endif
+
+#endif /* arch_atomic64_cmpxchg_relaxed */
+
+#ifndef arch_atomic64_try_cmpxchg_relaxed
+#ifdef arch_atomic64_try_cmpxchg
+#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg
+#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg
+#define arch_atomic64_try_cmpxchg_relaxed arch_atomic64_try_cmpxchg
+#endif /* arch_atomic64_try_cmpxchg */
+
+#ifndef arch_atomic64_try_cmpxchg
+static __always_inline bool
+arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
+{
+       s64 r, o = *old;
+       r = arch_atomic64_cmpxchg(v, o, new);
+       if (unlikely(r != o))
+               *old = r;
+       return likely(r == o);
+}
+#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
+#endif
+
+#ifndef arch_atomic64_try_cmpxchg_acquire
+static __always_inline bool
+arch_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
+{
+       s64 r, o = *old;
+       r = arch_atomic64_cmpxchg_acquire(v, o, new);
+       if (unlikely(r != o))
+               *old = r;
+       return likely(r == o);
+}
+#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg_acquire
+#endif
+
+#ifndef arch_atomic64_try_cmpxchg_release
+static __always_inline bool
+arch_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
+{
+       s64 r, o = *old;
+       r = arch_atomic64_cmpxchg_release(v, o, new);
+       if (unlikely(r != o))
+               *old = r;
+       return likely(r == o);
+}
+#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg_release
+#endif
+
+#ifndef arch_atomic64_try_cmpxchg_relaxed
+static __always_inline bool
+arch_atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
+{
+       s64 r, o = *old;
+       r = arch_atomic64_cmpxchg_relaxed(v, o, new);
+       if (unlikely(r != o))
+               *old = r;
+       return likely(r == o);
+}
+#define arch_atomic64_try_cmpxchg_relaxed arch_atomic64_try_cmpxchg_relaxed
+#endif
+
+#else /* arch_atomic64_try_cmpxchg_relaxed */
+
+#ifndef arch_atomic64_try_cmpxchg_acquire
+static __always_inline bool
+arch_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
+{
+       bool ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg_acquire
+#endif
+
+#ifndef arch_atomic64_try_cmpxchg_release
+static __always_inline bool
+arch_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
+{
+       __atomic_release_fence();
+       return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
+}
+#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg_release
+#endif
+
+#ifndef arch_atomic64_try_cmpxchg
+static __always_inline bool
+arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
+{
+       bool ret;
+       __atomic_pre_full_fence();
+       ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
+#endif
+
+#endif /* arch_atomic64_try_cmpxchg_relaxed */
+
+#ifndef arch_atomic64_sub_and_test
+/**
+ * arch_atomic64_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically subtracts @i from @v and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+static __always_inline bool
+arch_atomic64_sub_and_test(s64 i, atomic64_t *v)
+{
+       return arch_atomic64_sub_return(i, v) == 0;
+}
+#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
+#endif
+
+#ifndef arch_atomic64_dec_and_test
+/**
+ * arch_atomic64_dec_and_test - decrement and test
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically decrements @v by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */
+static __always_inline bool
+arch_atomic64_dec_and_test(atomic64_t *v)
+{
+       return arch_atomic64_dec_return(v) == 0;
+}
+#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
+#endif
+
+#ifndef arch_atomic64_inc_and_test
+/**
+ * arch_atomic64_inc_and_test - increment and test
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+static __always_inline bool
+arch_atomic64_inc_and_test(atomic64_t *v)
+{
+       return arch_atomic64_inc_return(v) == 0;
+}
+#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
+#endif
+
+#ifndef arch_atomic64_add_negative
+/**
+ * arch_atomic64_add_negative - add and test if negative
+ * @i: integer value to add
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically adds @i to @v and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero.
+ */
+static __always_inline bool
+arch_atomic64_add_negative(s64 i, atomic64_t *v)
+{
+       return arch_atomic64_add_return(i, v) < 0;
+}
+#define arch_atomic64_add_negative arch_atomic64_add_negative
+#endif
+
+#ifndef arch_atomic64_fetch_add_unless
+/**
+ * arch_atomic64_fetch_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic64_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as @v was not already @u.
+ * Returns original value of @v
+ */
+static __always_inline s64
+arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+       s64 c = arch_atomic64_read(v);
+
+       do {
+               if (unlikely(c == u))
+                       break;
+       } while (!arch_atomic64_try_cmpxchg(v, &c, c + a));
+
+       return c;
+}
+#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
+#endif
+
+#ifndef arch_atomic64_add_unless
+/**
+ * arch_atomic64_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic64_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, if @v was not already @u.
+ * Returns true if the addition was done.
+ */
+static __always_inline bool
+arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+       return arch_atomic64_fetch_add_unless(v, a, u) != u;
+}
+#define arch_atomic64_add_unless arch_atomic64_add_unless
+#endif
+
+#ifndef arch_atomic64_inc_not_zero
+/**
+ * arch_atomic64_inc_not_zero - increment unless the number is zero
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically increments @v by 1, if @v is non-zero.
+ * Returns true if the increment was done.
+ */
+static __always_inline bool
+arch_atomic64_inc_not_zero(atomic64_t *v)
+{
+       return arch_atomic64_add_unless(v, 1, 0);
+}
+#define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero
+#endif
+
+#ifndef arch_atomic64_inc_unless_negative
+static __always_inline bool
+arch_atomic64_inc_unless_negative(atomic64_t *v)
+{
+       s64 c = arch_atomic64_read(v);
+
+       do {
+               if (unlikely(c < 0))
+                       return false;
+       } while (!arch_atomic64_try_cmpxchg(v, &c, c + 1));
+
+       return true;
+}
+#define arch_atomic64_inc_unless_negative arch_atomic64_inc_unless_negative
+#endif
+
+#ifndef arch_atomic64_dec_unless_positive
+static __always_inline bool
+arch_atomic64_dec_unless_positive(atomic64_t *v)
+{
+       s64 c = arch_atomic64_read(v);
+
+       do {
+               if (unlikely(c > 0))
+                       return false;
+       } while (!arch_atomic64_try_cmpxchg(v, &c, c - 1));
+
+       return true;
+}
+#define arch_atomic64_dec_unless_positive arch_atomic64_dec_unless_positive
+#endif
+
+#ifndef arch_atomic64_dec_if_positive
+static __always_inline s64
+arch_atomic64_dec_if_positive(atomic64_t *v)
+{
+       s64 dec, c = arch_atomic64_read(v);
+
+       do {
+               dec = c - 1;
+               if (unlikely(dec < 0))
+                       break;
+       } while (!arch_atomic64_try_cmpxchg(v, &c, dec));
+
+       return dec;
+}
+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
+#endif
+
+#endif /* _LINUX_ATOMIC_FALLBACK_H */
+// cca554917d7ea73d5e3e7397dd70c484cad9b2c4
diff --git a/include/linux/atomic/atomic-instrumented.h b/include/linux/atomic/atomic-instrumented.h
new file mode 100644 (file)
index 0000000..a0f6543
--- /dev/null
@@ -0,0 +1,1915 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Generated by scripts/atomic/gen-atomic-instrumented.sh
+// DO NOT MODIFY THIS FILE DIRECTLY
+
+/*
+ * This file provides wrappers with KASAN instrumentation for atomic operations.
+ * To use this functionality an arch's atomic.h file needs to define all
+ * atomic operations with arch_ prefix (e.g. arch_atomic_read()) and include
+ * this file at the end. This file provides atomic_read() that forwards to
+ * arch_atomic_read() for actual atomic operation.
+ * Note: if an arch atomic operation is implemented by means of other atomic
+ * operations (e.g. atomic_read()/atomic_cmpxchg() loop), then it needs to use
+ * arch_ variants (i.e. arch_atomic_read()/arch_atomic_cmpxchg()) to avoid
+ * double instrumentation.
+ */
+#ifndef _LINUX_ATOMIC_INSTRUMENTED_H
+#define _LINUX_ATOMIC_INSTRUMENTED_H
+
+#include <linux/build_bug.h>
+#include <linux/compiler.h>
+#include <linux/instrumented.h>
+
+static __always_inline int
+atomic_read(const atomic_t *v)
+{
+       instrument_atomic_read(v, sizeof(*v));
+       return arch_atomic_read(v);
+}
+
+static __always_inline int
+atomic_read_acquire(const atomic_t *v)
+{
+       instrument_atomic_read(v, sizeof(*v));
+       return arch_atomic_read_acquire(v);
+}
+
+static __always_inline void
+atomic_set(atomic_t *v, int i)
+{
+       instrument_atomic_write(v, sizeof(*v));
+       arch_atomic_set(v, i);
+}
+
+static __always_inline void
+atomic_set_release(atomic_t *v, int i)
+{
+       instrument_atomic_write(v, sizeof(*v));
+       arch_atomic_set_release(v, i);
+}
+
+static __always_inline void
+atomic_add(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       arch_atomic_add(i, v);
+}
+
+static __always_inline int
+atomic_add_return(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_add_return(i, v);
+}
+
+static __always_inline int
+atomic_add_return_acquire(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_add_return_acquire(i, v);
+}
+
+static __always_inline int
+atomic_add_return_release(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_add_return_release(i, v);
+}
+
+static __always_inline int
+atomic_add_return_relaxed(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_add_return_relaxed(i, v);
+}
+
+static __always_inline int
+atomic_fetch_add(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_add(i, v);
+}
+
+static __always_inline int
+atomic_fetch_add_acquire(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_add_acquire(i, v);
+}
+
+static __always_inline int
+atomic_fetch_add_release(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_add_release(i, v);
+}
+
+static __always_inline int
+atomic_fetch_add_relaxed(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_add_relaxed(i, v);
+}
+
+static __always_inline void
+atomic_sub(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       arch_atomic_sub(i, v);
+}
+
+static __always_inline int
+atomic_sub_return(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_sub_return(i, v);
+}
+
+static __always_inline int
+atomic_sub_return_acquire(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_sub_return_acquire(i, v);
+}
+
+static __always_inline int
+atomic_sub_return_release(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_sub_return_release(i, v);
+}
+
+static __always_inline int
+atomic_sub_return_relaxed(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_sub_return_relaxed(i, v);
+}
+
+static __always_inline int
+atomic_fetch_sub(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_sub(i, v);
+}
+
+static __always_inline int
+atomic_fetch_sub_acquire(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_sub_acquire(i, v);
+}
+
+static __always_inline int
+atomic_fetch_sub_release(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_sub_release(i, v);
+}
+
+static __always_inline int
+atomic_fetch_sub_relaxed(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_sub_relaxed(i, v);
+}
+
+static __always_inline void
+atomic_inc(atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       arch_atomic_inc(v);
+}
+
+static __always_inline int
+atomic_inc_return(atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_inc_return(v);
+}
+
+static __always_inline int
+atomic_inc_return_acquire(atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_inc_return_acquire(v);
+}
+
+static __always_inline int
+atomic_inc_return_release(atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_inc_return_release(v);
+}
+
+static __always_inline int
+atomic_inc_return_relaxed(atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_inc_return_relaxed(v);
+}
+
+static __always_inline int
+atomic_fetch_inc(atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_inc(v);
+}
+
+static __always_inline int
+atomic_fetch_inc_acquire(atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_inc_acquire(v);
+}
+
+static __always_inline int
+atomic_fetch_inc_release(atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_inc_release(v);
+}
+
+static __always_inline int
+atomic_fetch_inc_relaxed(atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_inc_relaxed(v);
+}
+
+static __always_inline void
+atomic_dec(atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       arch_atomic_dec(v);
+}
+
+static __always_inline int
+atomic_dec_return(atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_dec_return(v);
+}
+
+static __always_inline int
+atomic_dec_return_acquire(atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_dec_return_acquire(v);
+}
+
+static __always_inline int
+atomic_dec_return_release(atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_dec_return_release(v);
+}
+
+static __always_inline int
+atomic_dec_return_relaxed(atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_dec_return_relaxed(v);
+}
+
+static __always_inline int
+atomic_fetch_dec(atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_dec(v);
+}
+
+static __always_inline int
+atomic_fetch_dec_acquire(atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_dec_acquire(v);
+}
+
+static __always_inline int
+atomic_fetch_dec_release(atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_dec_release(v);
+}
+
+static __always_inline int
+atomic_fetch_dec_relaxed(atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_dec_relaxed(v);
+}
+
+static __always_inline void
+atomic_and(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       arch_atomic_and(i, v);
+}
+
+static __always_inline int
+atomic_fetch_and(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_and(i, v);
+}
+
+static __always_inline int
+atomic_fetch_and_acquire(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_and_acquire(i, v);
+}
+
+static __always_inline int
+atomic_fetch_and_release(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_and_release(i, v);
+}
+
+static __always_inline int
+atomic_fetch_and_relaxed(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_and_relaxed(i, v);
+}
+
+static __always_inline void
+atomic_andnot(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       arch_atomic_andnot(i, v);
+}
+
+static __always_inline int
+atomic_fetch_andnot(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_andnot(i, v);
+}
+
+static __always_inline int
+atomic_fetch_andnot_acquire(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_andnot_acquire(i, v);
+}
+
+static __always_inline int
+atomic_fetch_andnot_release(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_andnot_release(i, v);
+}
+
+static __always_inline int
+atomic_fetch_andnot_relaxed(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_andnot_relaxed(i, v);
+}
+
+static __always_inline void
+atomic_or(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       arch_atomic_or(i, v);
+}
+
+static __always_inline int
+atomic_fetch_or(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_or(i, v);
+}
+
+static __always_inline int
+atomic_fetch_or_acquire(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_or_acquire(i, v);
+}
+
+static __always_inline int
+atomic_fetch_or_release(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_or_release(i, v);
+}
+
+static __always_inline int
+atomic_fetch_or_relaxed(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_or_relaxed(i, v);
+}
+
+static __always_inline void
+atomic_xor(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       arch_atomic_xor(i, v);
+}
+
+static __always_inline int
+atomic_fetch_xor(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_xor(i, v);
+}
+
+static __always_inline int
+atomic_fetch_xor_acquire(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_xor_acquire(i, v);
+}
+
+static __always_inline int
+atomic_fetch_xor_release(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_xor_release(i, v);
+}
+
+static __always_inline int
+atomic_fetch_xor_relaxed(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_xor_relaxed(i, v);
+}
+
+static __always_inline int
+atomic_xchg(atomic_t *v, int i)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_xchg(v, i);
+}
+
+static __always_inline int
+atomic_xchg_acquire(atomic_t *v, int i)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_xchg_acquire(v, i);
+}
+
+static __always_inline int
+atomic_xchg_release(atomic_t *v, int i)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_xchg_release(v, i);
+}
+
+static __always_inline int
+atomic_xchg_relaxed(atomic_t *v, int i)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_xchg_relaxed(v, i);
+}
+
+static __always_inline int
+atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_cmpxchg(v, old, new);
+}
+
+static __always_inline int
+atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_cmpxchg_acquire(v, old, new);
+}
+
+static __always_inline int
+atomic_cmpxchg_release(atomic_t *v, int old, int new)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_cmpxchg_release(v, old, new);
+}
+
+static __always_inline int
+atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_cmpxchg_relaxed(v, old, new);
+}
+
+static __always_inline bool
+atomic_try_cmpxchg(atomic_t *v, int *old, int new)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       instrument_atomic_read_write(old, sizeof(*old));
+       return arch_atomic_try_cmpxchg(v, old, new);
+}
+
+static __always_inline bool
+atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       instrument_atomic_read_write(old, sizeof(*old));
+       return arch_atomic_try_cmpxchg_acquire(v, old, new);
+}
+
+static __always_inline bool
+atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       instrument_atomic_read_write(old, sizeof(*old));
+       return arch_atomic_try_cmpxchg_release(v, old, new);
+}
+
+static __always_inline bool
+atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       instrument_atomic_read_write(old, sizeof(*old));
+       return arch_atomic_try_cmpxchg_relaxed(v, old, new);
+}
+
+static __always_inline bool
+atomic_sub_and_test(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_sub_and_test(i, v);
+}
+
+static __always_inline bool
+atomic_dec_and_test(atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_dec_and_test(v);
+}
+
+static __always_inline bool
+atomic_inc_and_test(atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_inc_and_test(v);
+}
+
+static __always_inline bool
+atomic_add_negative(int i, atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_add_negative(i, v);
+}
+
+static __always_inline int
+atomic_fetch_add_unless(atomic_t *v, int a, int u)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_fetch_add_unless(v, a, u);
+}
+
+static __always_inline bool
+atomic_add_unless(atomic_t *v, int a, int u)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_add_unless(v, a, u);
+}
+
+static __always_inline bool
+atomic_inc_not_zero(atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_inc_not_zero(v);
+}
+
+static __always_inline bool
+atomic_inc_unless_negative(atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_inc_unless_negative(v);
+}
+
+static __always_inline bool
+atomic_dec_unless_positive(atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_dec_unless_positive(v);
+}
+
+static __always_inline int
+atomic_dec_if_positive(atomic_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_dec_if_positive(v);
+}
+
+static __always_inline s64
+atomic64_read(const atomic64_t *v)
+{
+       instrument_atomic_read(v, sizeof(*v));
+       return arch_atomic64_read(v);
+}
+
+static __always_inline s64
+atomic64_read_acquire(const atomic64_t *v)
+{
+       instrument_atomic_read(v, sizeof(*v));
+       return arch_atomic64_read_acquire(v);
+}
+
+static __always_inline void
+atomic64_set(atomic64_t *v, s64 i)
+{
+       instrument_atomic_write(v, sizeof(*v));
+       arch_atomic64_set(v, i);
+}
+
+static __always_inline void
+atomic64_set_release(atomic64_t *v, s64 i)
+{
+       instrument_atomic_write(v, sizeof(*v));
+       arch_atomic64_set_release(v, i);
+}
+
+static __always_inline void
+atomic64_add(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       arch_atomic64_add(i, v);
+}
+
+static __always_inline s64
+atomic64_add_return(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_add_return(i, v);
+}
+
+static __always_inline s64
+atomic64_add_return_acquire(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_add_return_acquire(i, v);
+}
+
+static __always_inline s64
+atomic64_add_return_release(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_add_return_release(i, v);
+}
+
+static __always_inline s64
+atomic64_add_return_relaxed(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_add_return_relaxed(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_add(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_add(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_add_acquire(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_add_release(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_add_release(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_add_relaxed(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_add_relaxed(i, v);
+}
+
+static __always_inline void
+atomic64_sub(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       arch_atomic64_sub(i, v);
+}
+
+static __always_inline s64
+atomic64_sub_return(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_sub_return(i, v);
+}
+
+static __always_inline s64
+atomic64_sub_return_acquire(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_sub_return_acquire(i, v);
+}
+
+static __always_inline s64
+atomic64_sub_return_release(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_sub_return_release(i, v);
+}
+
+static __always_inline s64
+atomic64_sub_return_relaxed(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_sub_return_relaxed(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_sub(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_sub(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_sub_acquire(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_sub_release(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_sub_release(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_sub_relaxed(i, v);
+}
+
+static __always_inline void
+atomic64_inc(atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       arch_atomic64_inc(v);
+}
+
+static __always_inline s64
+atomic64_inc_return(atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_inc_return(v);
+}
+
+static __always_inline s64
+atomic64_inc_return_acquire(atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_inc_return_acquire(v);
+}
+
+static __always_inline s64
+atomic64_inc_return_release(atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_inc_return_release(v);
+}
+
+static __always_inline s64
+atomic64_inc_return_relaxed(atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_inc_return_relaxed(v);
+}
+
+static __always_inline s64
+atomic64_fetch_inc(atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_inc(v);
+}
+
+static __always_inline s64
+atomic64_fetch_inc_acquire(atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_inc_acquire(v);
+}
+
+static __always_inline s64
+atomic64_fetch_inc_release(atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_inc_release(v);
+}
+
+static __always_inline s64
+atomic64_fetch_inc_relaxed(atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_inc_relaxed(v);
+}
+
+static __always_inline void
+atomic64_dec(atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       arch_atomic64_dec(v);
+}
+
+static __always_inline s64
+atomic64_dec_return(atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_dec_return(v);
+}
+
+static __always_inline s64
+atomic64_dec_return_acquire(atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_dec_return_acquire(v);
+}
+
+static __always_inline s64
+atomic64_dec_return_release(atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_dec_return_release(v);
+}
+
+static __always_inline s64
+atomic64_dec_return_relaxed(atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_dec_return_relaxed(v);
+}
+
+static __always_inline s64
+atomic64_fetch_dec(atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_dec(v);
+}
+
+static __always_inline s64
+atomic64_fetch_dec_acquire(atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_dec_acquire(v);
+}
+
+static __always_inline s64
+atomic64_fetch_dec_release(atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_dec_release(v);
+}
+
+static __always_inline s64
+atomic64_fetch_dec_relaxed(atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_dec_relaxed(v);
+}
+
+static __always_inline void
+atomic64_and(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       arch_atomic64_and(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_and(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_and(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_and_acquire(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_and_release(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_and_release(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_and_relaxed(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_and_relaxed(i, v);
+}
+
+static __always_inline void
+atomic64_andnot(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       arch_atomic64_andnot(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_andnot(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_andnot(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_andnot_acquire(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_andnot_release(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_andnot_relaxed(i, v);
+}
+
+static __always_inline void
+atomic64_or(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       arch_atomic64_or(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_or(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_or(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_or_acquire(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_or_release(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_or_release(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_or_relaxed(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_or_relaxed(i, v);
+}
+
+static __always_inline void
+atomic64_xor(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       arch_atomic64_xor(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_xor(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_xor(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_xor_acquire(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_xor_release(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_xor_release(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_xor_relaxed(i, v);
+}
+
+static __always_inline s64
+atomic64_xchg(atomic64_t *v, s64 i)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_xchg(v, i);
+}
+
+static __always_inline s64
+atomic64_xchg_acquire(atomic64_t *v, s64 i)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_xchg_acquire(v, i);
+}
+
+static __always_inline s64
+atomic64_xchg_release(atomic64_t *v, s64 i)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_xchg_release(v, i);
+}
+
+static __always_inline s64
+atomic64_xchg_relaxed(atomic64_t *v, s64 i)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_xchg_relaxed(v, i);
+}
+
+static __always_inline s64
+atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_cmpxchg(v, old, new);
+}
+
+static __always_inline s64
+atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_cmpxchg_acquire(v, old, new);
+}
+
+static __always_inline s64
+atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_cmpxchg_release(v, old, new);
+}
+
+static __always_inline s64
+atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_cmpxchg_relaxed(v, old, new);
+}
+
+static __always_inline bool
+atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       instrument_atomic_read_write(old, sizeof(*old));
+       return arch_atomic64_try_cmpxchg(v, old, new);
+}
+
+static __always_inline bool
+atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       instrument_atomic_read_write(old, sizeof(*old));
+       return arch_atomic64_try_cmpxchg_acquire(v, old, new);
+}
+
+static __always_inline bool
+atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       instrument_atomic_read_write(old, sizeof(*old));
+       return arch_atomic64_try_cmpxchg_release(v, old, new);
+}
+
+static __always_inline bool
+atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       instrument_atomic_read_write(old, sizeof(*old));
+       return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
+}
+
+static __always_inline bool
+atomic64_sub_and_test(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_sub_and_test(i, v);
+}
+
+static __always_inline bool
+atomic64_dec_and_test(atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_dec_and_test(v);
+}
+
+static __always_inline bool
+atomic64_inc_and_test(atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_inc_and_test(v);
+}
+
+static __always_inline bool
+atomic64_add_negative(s64 i, atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_add_negative(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_fetch_add_unless(v, a, u);
+}
+
+static __always_inline bool
+atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_add_unless(v, a, u);
+}
+
+static __always_inline bool
+atomic64_inc_not_zero(atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_inc_not_zero(v);
+}
+
+static __always_inline bool
+atomic64_inc_unless_negative(atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_inc_unless_negative(v);
+}
+
+static __always_inline bool
+atomic64_dec_unless_positive(atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_dec_unless_positive(v);
+}
+
+static __always_inline s64
+atomic64_dec_if_positive(atomic64_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic64_dec_if_positive(v);
+}
+
+static __always_inline long
+atomic_long_read(const atomic_long_t *v)
+{
+       instrument_atomic_read(v, sizeof(*v));
+       return arch_atomic_long_read(v);
+}
+
+static __always_inline long
+atomic_long_read_acquire(const atomic_long_t *v)
+{
+       instrument_atomic_read(v, sizeof(*v));
+       return arch_atomic_long_read_acquire(v);
+}
+
+static __always_inline void
+atomic_long_set(atomic_long_t *v, long i)
+{
+       instrument_atomic_write(v, sizeof(*v));
+       arch_atomic_long_set(v, i);
+}
+
+static __always_inline void
+atomic_long_set_release(atomic_long_t *v, long i)
+{
+       instrument_atomic_write(v, sizeof(*v));
+       arch_atomic_long_set_release(v, i);
+}
+
+static __always_inline void
+atomic_long_add(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       arch_atomic_long_add(i, v);
+}
+
+static __always_inline long
+atomic_long_add_return(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_add_return(i, v);
+}
+
+static __always_inline long
+atomic_long_add_return_acquire(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_add_return_acquire(i, v);
+}
+
+static __always_inline long
+atomic_long_add_return_release(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_add_return_release(i, v);
+}
+
+static __always_inline long
+atomic_long_add_return_relaxed(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_add_return_relaxed(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_add(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_add(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_add_acquire(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_add_release(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_add_release(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_add_relaxed(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_add_relaxed(i, v);
+}
+
+static __always_inline void
+atomic_long_sub(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       arch_atomic_long_sub(i, v);
+}
+
+static __always_inline long
+atomic_long_sub_return(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_sub_return(i, v);
+}
+
+static __always_inline long
+atomic_long_sub_return_acquire(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_sub_return_acquire(i, v);
+}
+
+static __always_inline long
+atomic_long_sub_return_release(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_sub_return_release(i, v);
+}
+
+static __always_inline long
+atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_sub_return_relaxed(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_sub(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_sub(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_sub_acquire(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_sub_release(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_sub_release(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_sub_relaxed(i, v);
+}
+
+static __always_inline void
+atomic_long_inc(atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       arch_atomic_long_inc(v);
+}
+
+static __always_inline long
+atomic_long_inc_return(atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_inc_return(v);
+}
+
+static __always_inline long
+atomic_long_inc_return_acquire(atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_inc_return_acquire(v);
+}
+
+static __always_inline long
+atomic_long_inc_return_release(atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_inc_return_release(v);
+}
+
+static __always_inline long
+atomic_long_inc_return_relaxed(atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_inc_return_relaxed(v);
+}
+
+static __always_inline long
+atomic_long_fetch_inc(atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_inc(v);
+}
+
+static __always_inline long
+atomic_long_fetch_inc_acquire(atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_inc_acquire(v);
+}
+
+static __always_inline long
+atomic_long_fetch_inc_release(atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_inc_release(v);
+}
+
+static __always_inline long
+atomic_long_fetch_inc_relaxed(atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_inc_relaxed(v);
+}
+
+static __always_inline void
+atomic_long_dec(atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       arch_atomic_long_dec(v);
+}
+
+static __always_inline long
+atomic_long_dec_return(atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_dec_return(v);
+}
+
+static __always_inline long
+atomic_long_dec_return_acquire(atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_dec_return_acquire(v);
+}
+
+static __always_inline long
+atomic_long_dec_return_release(atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_dec_return_release(v);
+}
+
+static __always_inline long
+atomic_long_dec_return_relaxed(atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_dec_return_relaxed(v);
+}
+
+static __always_inline long
+atomic_long_fetch_dec(atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_dec(v);
+}
+
+static __always_inline long
+atomic_long_fetch_dec_acquire(atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_dec_acquire(v);
+}
+
+static __always_inline long
+atomic_long_fetch_dec_release(atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_dec_release(v);
+}
+
+static __always_inline long
+atomic_long_fetch_dec_relaxed(atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_dec_relaxed(v);
+}
+
+static __always_inline void
+atomic_long_and(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       arch_atomic_long_and(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_and(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_and(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_and_acquire(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_and_release(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_and_release(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_and_relaxed(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_and_relaxed(i, v);
+}
+
+static __always_inline void
+atomic_long_andnot(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       arch_atomic_long_andnot(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_andnot(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_andnot(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_andnot_acquire(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_andnot_release(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_andnot_relaxed(i, v);
+}
+
+static __always_inline void
+atomic_long_or(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       arch_atomic_long_or(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_or(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_or(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_or_acquire(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_or_release(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_or_release(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_or_relaxed(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_or_relaxed(i, v);
+}
+
+static __always_inline void
+atomic_long_xor(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       arch_atomic_long_xor(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_xor(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_xor(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_xor_acquire(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_xor_release(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_xor_release(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_xor_relaxed(i, v);
+}
+
+static __always_inline long
+atomic_long_xchg(atomic_long_t *v, long i)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_xchg(v, i);
+}
+
+static __always_inline long
+atomic_long_xchg_acquire(atomic_long_t *v, long i)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_xchg_acquire(v, i);
+}
+
+static __always_inline long
+atomic_long_xchg_release(atomic_long_t *v, long i)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_xchg_release(v, i);
+}
+
+static __always_inline long
+atomic_long_xchg_relaxed(atomic_long_t *v, long i)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_xchg_relaxed(v, i);
+}
+
+static __always_inline long
+atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_cmpxchg(v, old, new);
+}
+
+static __always_inline long
+atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_cmpxchg_acquire(v, old, new);
+}
+
+static __always_inline long
+atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_cmpxchg_release(v, old, new);
+}
+
+static __always_inline long
+atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_cmpxchg_relaxed(v, old, new);
+}
+
+static __always_inline bool
+atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       instrument_atomic_read_write(old, sizeof(*old));
+       return arch_atomic_long_try_cmpxchg(v, old, new);
+}
+
+static __always_inline bool
+atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       instrument_atomic_read_write(old, sizeof(*old));
+       return arch_atomic_long_try_cmpxchg_acquire(v, old, new);
+}
+
+static __always_inline bool
+atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       instrument_atomic_read_write(old, sizeof(*old));
+       return arch_atomic_long_try_cmpxchg_release(v, old, new);
+}
+
+static __always_inline bool
+atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       instrument_atomic_read_write(old, sizeof(*old));
+       return arch_atomic_long_try_cmpxchg_relaxed(v, old, new);
+}
+
+static __always_inline bool
+atomic_long_sub_and_test(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_sub_and_test(i, v);
+}
+
+static __always_inline bool
+atomic_long_dec_and_test(atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_dec_and_test(v);
+}
+
+static __always_inline bool
+atomic_long_inc_and_test(atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_inc_and_test(v);
+}
+
+static __always_inline bool
+atomic_long_add_negative(long i, atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_add_negative(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_fetch_add_unless(v, a, u);
+}
+
+static __always_inline bool
+atomic_long_add_unless(atomic_long_t *v, long a, long u)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_add_unless(v, a, u);
+}
+
+static __always_inline bool
+atomic_long_inc_not_zero(atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_inc_not_zero(v);
+}
+
+static __always_inline bool
+atomic_long_inc_unless_negative(atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_inc_unless_negative(v);
+}
+
+static __always_inline bool
+atomic_long_dec_unless_positive(atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_dec_unless_positive(v);
+}
+
+static __always_inline long
+atomic_long_dec_if_positive(atomic_long_t *v)
+{
+       instrument_atomic_read_write(v, sizeof(*v));
+       return arch_atomic_long_dec_if_positive(v);
+}
+
+#define xchg(ptr, ...) \
+({ \
+       typeof(ptr) __ai_ptr = (ptr); \
+       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+       arch_xchg(__ai_ptr, __VA_ARGS__); \
+})
+
+#define xchg_acquire(ptr, ...) \
+({ \
+       typeof(ptr) __ai_ptr = (ptr); \
+       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+       arch_xchg_acquire(__ai_ptr, __VA_ARGS__); \
+})
+
+#define xchg_release(ptr, ...) \
+({ \
+       typeof(ptr) __ai_ptr = (ptr); \
+       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+       arch_xchg_release(__ai_ptr, __VA_ARGS__); \
+})
+
+#define xchg_relaxed(ptr, ...) \
+({ \
+       typeof(ptr) __ai_ptr = (ptr); \
+       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+       arch_xchg_relaxed(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg(ptr, ...) \
+({ \
+       typeof(ptr) __ai_ptr = (ptr); \
+       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+       arch_cmpxchg(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg_acquire(ptr, ...) \
+({ \
+       typeof(ptr) __ai_ptr = (ptr); \
+       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+       arch_cmpxchg_acquire(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg_release(ptr, ...) \
+({ \
+       typeof(ptr) __ai_ptr = (ptr); \
+       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+       arch_cmpxchg_release(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg_relaxed(ptr, ...) \
+({ \
+       typeof(ptr) __ai_ptr = (ptr); \
+       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+       arch_cmpxchg_relaxed(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg64(ptr, ...) \
+({ \
+       typeof(ptr) __ai_ptr = (ptr); \
+       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+       arch_cmpxchg64(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg64_acquire(ptr, ...) \
+({ \
+       typeof(ptr) __ai_ptr = (ptr); \
+       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+       arch_cmpxchg64_acquire(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg64_release(ptr, ...) \
+({ \
+       typeof(ptr) __ai_ptr = (ptr); \
+       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+       arch_cmpxchg64_release(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg64_relaxed(ptr, ...) \
+({ \
+       typeof(ptr) __ai_ptr = (ptr); \
+       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+       arch_cmpxchg64_relaxed(__ai_ptr, __VA_ARGS__); \
+})
+
+#define try_cmpxchg(ptr, oldp, ...) \
+({ \
+       typeof(ptr) __ai_ptr = (ptr); \
+       typeof(oldp) __ai_oldp = (oldp); \
+       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+       instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \
+       arch_try_cmpxchg(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg_acquire(ptr, oldp, ...) \
+({ \
+       typeof(ptr) __ai_ptr = (ptr); \
+       typeof(oldp) __ai_oldp = (oldp); \
+       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+       instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \
+       arch_try_cmpxchg_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg_release(ptr, oldp, ...) \
+({ \
+       typeof(ptr) __ai_ptr = (ptr); \
+       typeof(oldp) __ai_oldp = (oldp); \
+       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+       instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \
+       arch_try_cmpxchg_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg_relaxed(ptr, oldp, ...) \
+({ \
+       typeof(ptr) __ai_ptr = (ptr); \
+       typeof(oldp) __ai_oldp = (oldp); \
+       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+       instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \
+       arch_try_cmpxchg_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define cmpxchg_local(ptr, ...) \
+({ \
+       typeof(ptr) __ai_ptr = (ptr); \
+       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+       arch_cmpxchg_local(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg64_local(ptr, ...) \
+({ \
+       typeof(ptr) __ai_ptr = (ptr); \
+       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+       arch_cmpxchg64_local(__ai_ptr, __VA_ARGS__); \
+})
+
+#define sync_cmpxchg(ptr, ...) \
+({ \
+       typeof(ptr) __ai_ptr = (ptr); \
+       instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
+       arch_sync_cmpxchg(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg_double(ptr, ...) \
+({ \
+       typeof(ptr) __ai_ptr = (ptr); \
+       instrument_atomic_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \
+       arch_cmpxchg_double(__ai_ptr, __VA_ARGS__); \
+})
+
+
+#define cmpxchg_double_local(ptr, ...) \
+({ \
+       typeof(ptr) __ai_ptr = (ptr); \
+       instrument_atomic_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \
+       arch_cmpxchg_double_local(__ai_ptr, __VA_ARGS__); \
+})
+
+#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */
+// 2a9553f0a9d5619f19151092df5cabbbf16ce835
diff --git a/include/linux/atomic/atomic-long.h b/include/linux/atomic/atomic-long.h
new file mode 100644 (file)
index 0000000..800b8c3
--- /dev/null
@@ -0,0 +1,1014 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Generated by scripts/atomic/gen-atomic-long.sh
+// DO NOT MODIFY THIS FILE DIRECTLY
+
+#ifndef _LINUX_ATOMIC_LONG_H
+#define _LINUX_ATOMIC_LONG_H
+
+#include <linux/compiler.h>
+#include <asm/types.h>
+
+#ifdef CONFIG_64BIT
+typedef atomic64_t atomic_long_t;
+#define ATOMIC_LONG_INIT(i)            ATOMIC64_INIT(i)
+#define atomic_long_cond_read_acquire  atomic64_cond_read_acquire
+#define atomic_long_cond_read_relaxed  atomic64_cond_read_relaxed
+#else
+typedef atomic_t atomic_long_t;
+#define ATOMIC_LONG_INIT(i)            ATOMIC_INIT(i)
+#define atomic_long_cond_read_acquire  atomic_cond_read_acquire
+#define atomic_long_cond_read_relaxed  atomic_cond_read_relaxed
+#endif
+
+#ifdef CONFIG_64BIT
+
+static __always_inline long
+arch_atomic_long_read(const atomic_long_t *v)
+{
+       return arch_atomic64_read(v);
+}
+
+static __always_inline long
+arch_atomic_long_read_acquire(const atomic_long_t *v)
+{
+       return arch_atomic64_read_acquire(v);
+}
+
+static __always_inline void
+arch_atomic_long_set(atomic_long_t *v, long i)
+{
+       arch_atomic64_set(v, i);
+}
+
+static __always_inline void
+arch_atomic_long_set_release(atomic_long_t *v, long i)
+{
+       arch_atomic64_set_release(v, i);
+}
+
+static __always_inline void
+arch_atomic_long_add(long i, atomic_long_t *v)
+{
+       arch_atomic64_add(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_add_return(long i, atomic_long_t *v)
+{
+       return arch_atomic64_add_return(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_add_return_acquire(long i, atomic_long_t *v)
+{
+       return arch_atomic64_add_return_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_add_return_release(long i, atomic_long_t *v)
+{
+       return arch_atomic64_add_return_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_add_return_relaxed(long i, atomic_long_t *v)
+{
+       return arch_atomic64_add_return_relaxed(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_add(long i, atomic_long_t *v)
+{
+       return arch_atomic64_fetch_add(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
+{
+       return arch_atomic64_fetch_add_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_add_release(long i, atomic_long_t *v)
+{
+       return arch_atomic64_fetch_add_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_add_relaxed(long i, atomic_long_t *v)
+{
+       return arch_atomic64_fetch_add_relaxed(i, v);
+}
+
+static __always_inline void
+arch_atomic_long_sub(long i, atomic_long_t *v)
+{
+       arch_atomic64_sub(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_sub_return(long i, atomic_long_t *v)
+{
+       return arch_atomic64_sub_return(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_sub_return_acquire(long i, atomic_long_t *v)
+{
+       return arch_atomic64_sub_return_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_sub_return_release(long i, atomic_long_t *v)
+{
+       return arch_atomic64_sub_return_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
+{
+       return arch_atomic64_sub_return_relaxed(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_sub(long i, atomic_long_t *v)
+{
+       return arch_atomic64_fetch_sub(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
+{
+       return arch_atomic64_fetch_sub_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_sub_release(long i, atomic_long_t *v)
+{
+       return arch_atomic64_fetch_sub_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v)
+{
+       return arch_atomic64_fetch_sub_relaxed(i, v);
+}
+
+static __always_inline void
+arch_atomic_long_inc(atomic_long_t *v)
+{
+       arch_atomic64_inc(v);
+}
+
+static __always_inline long
+arch_atomic_long_inc_return(atomic_long_t *v)
+{
+       return arch_atomic64_inc_return(v);
+}
+
+static __always_inline long
+arch_atomic_long_inc_return_acquire(atomic_long_t *v)
+{
+       return arch_atomic64_inc_return_acquire(v);
+}
+
+static __always_inline long
+arch_atomic_long_inc_return_release(atomic_long_t *v)
+{
+       return arch_atomic64_inc_return_release(v);
+}
+
+static __always_inline long
+arch_atomic_long_inc_return_relaxed(atomic_long_t *v)
+{
+       return arch_atomic64_inc_return_relaxed(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_inc(atomic_long_t *v)
+{
+       return arch_atomic64_fetch_inc(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_inc_acquire(atomic_long_t *v)
+{
+       return arch_atomic64_fetch_inc_acquire(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_inc_release(atomic_long_t *v)
+{
+       return arch_atomic64_fetch_inc_release(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_inc_relaxed(atomic_long_t *v)
+{
+       return arch_atomic64_fetch_inc_relaxed(v);
+}
+
+static __always_inline void
+arch_atomic_long_dec(atomic_long_t *v)
+{
+       arch_atomic64_dec(v);
+}
+
+static __always_inline long
+arch_atomic_long_dec_return(atomic_long_t *v)
+{
+       return arch_atomic64_dec_return(v);
+}
+
+static __always_inline long
+arch_atomic_long_dec_return_acquire(atomic_long_t *v)
+{
+       return arch_atomic64_dec_return_acquire(v);
+}
+
+static __always_inline long
+arch_atomic_long_dec_return_release(atomic_long_t *v)
+{
+       return arch_atomic64_dec_return_release(v);
+}
+
+static __always_inline long
+arch_atomic_long_dec_return_relaxed(atomic_long_t *v)
+{
+       return arch_atomic64_dec_return_relaxed(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_dec(atomic_long_t *v)
+{
+       return arch_atomic64_fetch_dec(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_dec_acquire(atomic_long_t *v)
+{
+       return arch_atomic64_fetch_dec_acquire(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_dec_release(atomic_long_t *v)
+{
+       return arch_atomic64_fetch_dec_release(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_dec_relaxed(atomic_long_t *v)
+{
+       return arch_atomic64_fetch_dec_relaxed(v);
+}
+
+static __always_inline void
+arch_atomic_long_and(long i, atomic_long_t *v)
+{
+       arch_atomic64_and(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_and(long i, atomic_long_t *v)
+{
+       return arch_atomic64_fetch_and(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
+{
+       return arch_atomic64_fetch_and_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_and_release(long i, atomic_long_t *v)
+{
+       return arch_atomic64_fetch_and_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_and_relaxed(long i, atomic_long_t *v)
+{
+       return arch_atomic64_fetch_and_relaxed(i, v);
+}
+
+static __always_inline void
+arch_atomic_long_andnot(long i, atomic_long_t *v)
+{
+       arch_atomic64_andnot(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_andnot(long i, atomic_long_t *v)
+{
+       return arch_atomic64_fetch_andnot(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
+{
+       return arch_atomic64_fetch_andnot_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
+{
+       return arch_atomic64_fetch_andnot_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v)
+{
+       return arch_atomic64_fetch_andnot_relaxed(i, v);
+}
+
+static __always_inline void
+arch_atomic_long_or(long i, atomic_long_t *v)
+{
+       arch_atomic64_or(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_or(long i, atomic_long_t *v)
+{
+       return arch_atomic64_fetch_or(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
+{
+       return arch_atomic64_fetch_or_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_or_release(long i, atomic_long_t *v)
+{
+       return arch_atomic64_fetch_or_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_or_relaxed(long i, atomic_long_t *v)
+{
+       return arch_atomic64_fetch_or_relaxed(i, v);
+}
+
+static __always_inline void
+arch_atomic_long_xor(long i, atomic_long_t *v)
+{
+       arch_atomic64_xor(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_xor(long i, atomic_long_t *v)
+{
+       return arch_atomic64_fetch_xor(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
+{
+       return arch_atomic64_fetch_xor_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_xor_release(long i, atomic_long_t *v)
+{
+       return arch_atomic64_fetch_xor_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
+{
+       return arch_atomic64_fetch_xor_relaxed(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_xchg(atomic_long_t *v, long i)
+{
+       return arch_atomic64_xchg(v, i);
+}
+
+static __always_inline long
+arch_atomic_long_xchg_acquire(atomic_long_t *v, long i)
+{
+       return arch_atomic64_xchg_acquire(v, i);
+}
+
+static __always_inline long
+arch_atomic_long_xchg_release(atomic_long_t *v, long i)
+{
+       return arch_atomic64_xchg_release(v, i);
+}
+
+static __always_inline long
+arch_atomic_long_xchg_relaxed(atomic_long_t *v, long i)
+{
+       return arch_atomic64_xchg_relaxed(v, i);
+}
+
+static __always_inline long
+arch_atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
+{
+       return arch_atomic64_cmpxchg(v, old, new);
+}
+
+static __always_inline long
+arch_atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
+{
+       return arch_atomic64_cmpxchg_acquire(v, old, new);
+}
+
+static __always_inline long
+arch_atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
+{
+       return arch_atomic64_cmpxchg_release(v, old, new);
+}
+
+static __always_inline long
+arch_atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
+{
+       return arch_atomic64_cmpxchg_relaxed(v, old, new);
+}
+
+static __always_inline bool
+arch_atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
+{
+       return arch_atomic64_try_cmpxchg(v, (s64 *)old, new);
+}
+
+static __always_inline bool
+arch_atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
+{
+       return arch_atomic64_try_cmpxchg_acquire(v, (s64 *)old, new);
+}
+
+static __always_inline bool
+arch_atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
+{
+       return arch_atomic64_try_cmpxchg_release(v, (s64 *)old, new);
+}
+
+static __always_inline bool
+arch_atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
+{
+       return arch_atomic64_try_cmpxchg_relaxed(v, (s64 *)old, new);
+}
+
+static __always_inline bool
+arch_atomic_long_sub_and_test(long i, atomic_long_t *v)
+{
+       return arch_atomic64_sub_and_test(i, v);
+}
+
+static __always_inline bool
+arch_atomic_long_dec_and_test(atomic_long_t *v)
+{
+       return arch_atomic64_dec_and_test(v);
+}
+
+static __always_inline bool
+arch_atomic_long_inc_and_test(atomic_long_t *v)
+{
+       return arch_atomic64_inc_and_test(v);
+}
+
+static __always_inline bool
+arch_atomic_long_add_negative(long i, atomic_long_t *v)
+{
+       return arch_atomic64_add_negative(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
+{
+       return arch_atomic64_fetch_add_unless(v, a, u);
+}
+
+static __always_inline bool
+arch_atomic_long_add_unless(atomic_long_t *v, long a, long u)
+{
+       return arch_atomic64_add_unless(v, a, u);
+}
+
+static __always_inline bool
+arch_atomic_long_inc_not_zero(atomic_long_t *v)
+{
+       return arch_atomic64_inc_not_zero(v);
+}
+
+static __always_inline bool
+arch_atomic_long_inc_unless_negative(atomic_long_t *v)
+{
+       return arch_atomic64_inc_unless_negative(v);
+}
+
+static __always_inline bool
+arch_atomic_long_dec_unless_positive(atomic_long_t *v)
+{
+       return arch_atomic64_dec_unless_positive(v);
+}
+
+static __always_inline long
+arch_atomic_long_dec_if_positive(atomic_long_t *v)
+{
+       return arch_atomic64_dec_if_positive(v);
+}
+
+#else /* CONFIG_64BIT */
+
+static __always_inline long
+arch_atomic_long_read(const atomic_long_t *v)
+{
+       return arch_atomic_read(v);
+}
+
+static __always_inline long
+arch_atomic_long_read_acquire(const atomic_long_t *v)
+{
+       return arch_atomic_read_acquire(v);
+}
+
+static __always_inline void
+arch_atomic_long_set(atomic_long_t *v, long i)
+{
+       arch_atomic_set(v, i);
+}
+
+static __always_inline void
+arch_atomic_long_set_release(atomic_long_t *v, long i)
+{
+       arch_atomic_set_release(v, i);
+}
+
+static __always_inline void
+arch_atomic_long_add(long i, atomic_long_t *v)
+{
+       arch_atomic_add(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_add_return(long i, atomic_long_t *v)
+{
+       return arch_atomic_add_return(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_add_return_acquire(long i, atomic_long_t *v)
+{
+       return arch_atomic_add_return_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_add_return_release(long i, atomic_long_t *v)
+{
+       return arch_atomic_add_return_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_add_return_relaxed(long i, atomic_long_t *v)
+{
+       return arch_atomic_add_return_relaxed(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_add(long i, atomic_long_t *v)
+{
+       return arch_atomic_fetch_add(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
+{
+       return arch_atomic_fetch_add_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_add_release(long i, atomic_long_t *v)
+{
+       return arch_atomic_fetch_add_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_add_relaxed(long i, atomic_long_t *v)
+{
+       return arch_atomic_fetch_add_relaxed(i, v);
+}
+
+static __always_inline void
+arch_atomic_long_sub(long i, atomic_long_t *v)
+{
+       arch_atomic_sub(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_sub_return(long i, atomic_long_t *v)
+{
+       return arch_atomic_sub_return(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_sub_return_acquire(long i, atomic_long_t *v)
+{
+       return arch_atomic_sub_return_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_sub_return_release(long i, atomic_long_t *v)
+{
+       return arch_atomic_sub_return_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
+{
+       return arch_atomic_sub_return_relaxed(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_sub(long i, atomic_long_t *v)
+{
+       return arch_atomic_fetch_sub(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
+{
+       return arch_atomic_fetch_sub_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_sub_release(long i, atomic_long_t *v)
+{
+       return arch_atomic_fetch_sub_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v)
+{
+       return arch_atomic_fetch_sub_relaxed(i, v);
+}
+
+static __always_inline void
+arch_atomic_long_inc(atomic_long_t *v)
+{
+       arch_atomic_inc(v);
+}
+
+static __always_inline long
+arch_atomic_long_inc_return(atomic_long_t *v)
+{
+       return arch_atomic_inc_return(v);
+}
+
+static __always_inline long
+arch_atomic_long_inc_return_acquire(atomic_long_t *v)
+{
+       return arch_atomic_inc_return_acquire(v);
+}
+
+static __always_inline long
+arch_atomic_long_inc_return_release(atomic_long_t *v)
+{
+       return arch_atomic_inc_return_release(v);
+}
+
+static __always_inline long
+arch_atomic_long_inc_return_relaxed(atomic_long_t *v)
+{
+       return arch_atomic_inc_return_relaxed(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_inc(atomic_long_t *v)
+{
+       return arch_atomic_fetch_inc(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_inc_acquire(atomic_long_t *v)
+{
+       return arch_atomic_fetch_inc_acquire(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_inc_release(atomic_long_t *v)
+{
+       return arch_atomic_fetch_inc_release(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_inc_relaxed(atomic_long_t *v)
+{
+       return arch_atomic_fetch_inc_relaxed(v);
+}
+
+static __always_inline void
+arch_atomic_long_dec(atomic_long_t *v)
+{
+       arch_atomic_dec(v);
+}
+
+static __always_inline long
+arch_atomic_long_dec_return(atomic_long_t *v)
+{
+       return arch_atomic_dec_return(v);
+}
+
+static __always_inline long
+arch_atomic_long_dec_return_acquire(atomic_long_t *v)
+{
+       return arch_atomic_dec_return_acquire(v);
+}
+
+static __always_inline long
+arch_atomic_long_dec_return_release(atomic_long_t *v)
+{
+       return arch_atomic_dec_return_release(v);
+}
+
+static __always_inline long
+arch_atomic_long_dec_return_relaxed(atomic_long_t *v)
+{
+       return arch_atomic_dec_return_relaxed(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_dec(atomic_long_t *v)
+{
+       return arch_atomic_fetch_dec(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_dec_acquire(atomic_long_t *v)
+{
+       return arch_atomic_fetch_dec_acquire(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_dec_release(atomic_long_t *v)
+{
+       return arch_atomic_fetch_dec_release(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_dec_relaxed(atomic_long_t *v)
+{
+       return arch_atomic_fetch_dec_relaxed(v);
+}
+
+static __always_inline void
+arch_atomic_long_and(long i, atomic_long_t *v)
+{
+       arch_atomic_and(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_and(long i, atomic_long_t *v)
+{
+       return arch_atomic_fetch_and(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
+{
+       return arch_atomic_fetch_and_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_and_release(long i, atomic_long_t *v)
+{
+       return arch_atomic_fetch_and_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_and_relaxed(long i, atomic_long_t *v)
+{
+       return arch_atomic_fetch_and_relaxed(i, v);
+}
+
+static __always_inline void
+arch_atomic_long_andnot(long i, atomic_long_t *v)
+{
+       arch_atomic_andnot(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_andnot(long i, atomic_long_t *v)
+{
+       return arch_atomic_fetch_andnot(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
+{
+       return arch_atomic_fetch_andnot_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
+{
+       return arch_atomic_fetch_andnot_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v)
+{
+       return arch_atomic_fetch_andnot_relaxed(i, v);
+}
+
+static __always_inline void
+arch_atomic_long_or(long i, atomic_long_t *v)
+{
+       arch_atomic_or(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_or(long i, atomic_long_t *v)
+{
+       return arch_atomic_fetch_or(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
+{
+       return arch_atomic_fetch_or_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_or_release(long i, atomic_long_t *v)
+{
+       return arch_atomic_fetch_or_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_or_relaxed(long i, atomic_long_t *v)
+{
+       return arch_atomic_fetch_or_relaxed(i, v);
+}
+
+static __always_inline void
+arch_atomic_long_xor(long i, atomic_long_t *v)
+{
+       arch_atomic_xor(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_xor(long i, atomic_long_t *v)
+{
+       return arch_atomic_fetch_xor(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
+{
+       return arch_atomic_fetch_xor_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_xor_release(long i, atomic_long_t *v)
+{
+       return arch_atomic_fetch_xor_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
+{
+       return arch_atomic_fetch_xor_relaxed(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_xchg(atomic_long_t *v, long i)
+{
+       return arch_atomic_xchg(v, i);
+}
+
+static __always_inline long
+arch_atomic_long_xchg_acquire(atomic_long_t *v, long i)
+{
+       return arch_atomic_xchg_acquire(v, i);
+}
+
+static __always_inline long
+arch_atomic_long_xchg_release(atomic_long_t *v, long i)
+{
+       return arch_atomic_xchg_release(v, i);
+}
+
+static __always_inline long
+arch_atomic_long_xchg_relaxed(atomic_long_t *v, long i)
+{
+       return arch_atomic_xchg_relaxed(v, i);
+}
+
+static __always_inline long
+arch_atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
+{
+       return arch_atomic_cmpxchg(v, old, new);
+}
+
+static __always_inline long
+arch_atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
+{
+       return arch_atomic_cmpxchg_acquire(v, old, new);
+}
+
+static __always_inline long
+arch_atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
+{
+       return arch_atomic_cmpxchg_release(v, old, new);
+}
+
+static __always_inline long
+arch_atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
+{
+       return arch_atomic_cmpxchg_relaxed(v, old, new);
+}
+
+static __always_inline bool
+arch_atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
+{
+       return arch_atomic_try_cmpxchg(v, (int *)old, new);
+}
+
+static __always_inline bool
+arch_atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
+{
+       return arch_atomic_try_cmpxchg_acquire(v, (int *)old, new);
+}
+
+static __always_inline bool
+arch_atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
+{
+       return arch_atomic_try_cmpxchg_release(v, (int *)old, new);
+}
+
+static __always_inline bool
+arch_atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
+{
+       return arch_atomic_try_cmpxchg_relaxed(v, (int *)old, new);
+}
+
+static __always_inline bool
+arch_atomic_long_sub_and_test(long i, atomic_long_t *v)
+{
+       return arch_atomic_sub_and_test(i, v);
+}
+
+static __always_inline bool
+arch_atomic_long_dec_and_test(atomic_long_t *v)
+{
+       return arch_atomic_dec_and_test(v);
+}
+
+static __always_inline bool
+arch_atomic_long_inc_and_test(atomic_long_t *v)
+{
+       return arch_atomic_inc_and_test(v);
+}
+
+static __always_inline bool
+arch_atomic_long_add_negative(long i, atomic_long_t *v)
+{
+       return arch_atomic_add_negative(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
+{
+       return arch_atomic_fetch_add_unless(v, a, u);
+}
+
+static __always_inline bool
+arch_atomic_long_add_unless(atomic_long_t *v, long a, long u)
+{
+       return arch_atomic_add_unless(v, a, u);
+}
+
+static __always_inline bool
+arch_atomic_long_inc_not_zero(atomic_long_t *v)
+{
+       return arch_atomic_inc_not_zero(v);
+}
+
+static __always_inline bool
+arch_atomic_long_inc_unless_negative(atomic_long_t *v)
+{
+       return arch_atomic_inc_unless_negative(v);
+}
+
+static __always_inline bool
+arch_atomic_long_dec_unless_positive(atomic_long_t *v)
+{
+       return arch_atomic_dec_unless_positive(v);
+}
+
+static __always_inline long
+arch_atomic_long_dec_if_positive(atomic_long_t *v)
+{
+       return arch_atomic_dec_if_positive(v);
+}
+
+#endif /* CONFIG_64BIT */
+#endif /* _LINUX_ATOMIC_LONG_H */
+// e8f0e08ff072b74d180eabe2ad001282b38c2c88
index 44df4fc..2953085 100644 (file)
@@ -143,7 +143,7 @@ static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
        sb = inode->i_sb;
 #ifdef CONFIG_BLOCK
        if (sb_is_blkdev_sb(sb))
-               return I_BDEV(inode)->bd_bdi;
+               return I_BDEV(inode)->bd_disk->bdi;
 #endif
        return sb->s_bdi;
 }
index 2203b68..7b5f65a 100644 (file)
@@ -5,7 +5,6 @@
 #ifndef __LINUX_BIO_H
 #define __LINUX_BIO_H
 
-#include <linux/highmem.h>
 #include <linux/mempool.h>
 #include <linux/ioprio.h>
 /* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
@@ -519,47 +518,6 @@ static inline void bio_clone_blkg_association(struct bio *dst,
                                              struct bio *src) { }
 #endif /* CONFIG_BLK_CGROUP */
 
-#ifdef CONFIG_HIGHMEM
-/*
- * remember never ever reenable interrupts between a bvec_kmap_irq and
- * bvec_kunmap_irq!
- */
-static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
-{
-       unsigned long addr;
-
-       /*
-        * might not be a highmem page, but the preempt/irq count
-        * balancing is a lot nicer this way
-        */
-       local_irq_save(*flags);
-       addr = (unsigned long) kmap_atomic(bvec->bv_page);
-
-       BUG_ON(addr & ~PAGE_MASK);
-
-       return (char *) addr + bvec->bv_offset;
-}
-
-static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
-{
-       unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
-
-       kunmap_atomic((void *) ptr);
-       local_irq_restore(*flags);
-}
-
-#else
-static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
-{
-       return page_address(bvec->bv_page) + bvec->bv_offset;
-}
-
-static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
-{
-       *flags = 0;
-}
-#endif
-
 /*
  * BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
  *
index 3704843..b4de201 100644 (file)
@@ -152,8 +152,8 @@ typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
 typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
 typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
 typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
-typedef size_t (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, char *buf,
-                                     size_t size);
+typedef bool (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd,
+                               struct seq_file *s);
 
 struct blkcg_policy {
        int                             plid;
index 1d18447..13ba186 100644 (file)
@@ -404,7 +404,13 @@ enum {
        BLK_MQ_F_STACKING       = 1 << 2,
        BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3,
        BLK_MQ_F_BLOCKING       = 1 << 5,
+       /* Do not allow an I/O scheduler to be configured. */
        BLK_MQ_F_NO_SCHED       = 1 << 6,
+       /*
+        * Select 'none' during queue registration in case of a single hwq
+        * or shared hwqs instead of 'mq-deadline'.
+        */
+       BLK_MQ_F_NO_SCHED_BY_DEFAULT    = 1 << 7,
        BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
        BLK_MQ_F_ALLOC_POLICY_BITS = 1,
 
@@ -426,18 +432,14 @@ enum {
        ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \
                << BLK_MQ_F_ALLOC_POLICY_START_BIT)
 
+struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
+               struct lock_class_key *lkclass);
 #define blk_mq_alloc_disk(set, queuedata)                              \
 ({                                                                     \
        static struct lock_class_key __key;                             \
-       struct gendisk *__disk = __blk_mq_alloc_disk(set, queuedata);   \
                                                                        \
-       if (!IS_ERR(__disk))                                            \
-               lockdep_init_map(&__disk->lockdep_map,                  \
-                       "(bio completion)", &__key, 0);                 \
-       __disk;                                                         \
+       __blk_mq_alloc_disk(set, queuedata, &__key);                    \
 })
-struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set,
-               void *queuedata);
 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
 int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
                struct request_queue *q);
index 290f906..1335efa 100644 (file)
@@ -34,14 +34,10 @@ struct block_device {
        void *                  bd_holder;
        int                     bd_holders;
        bool                    bd_write_holder;
-#ifdef CONFIG_SYSFS
-       struct list_head        bd_holder_disks;
-#endif
        struct kobject          *bd_holder_dir;
        u8                      bd_partno;
        spinlock_t              bd_size_lock; /* for bd_inode->i_size updates */
        struct gendisk *        bd_disk;
-       struct backing_dev_info *bd_bdi;
 
        /* The counter of freeze processes */
        int                     bd_fsfreeze_count;
index d3afea4..c9cb124 100644 (file)
@@ -11,7 +11,6 @@
 #include <linux/minmax.h>
 #include <linux/timer.h>
 #include <linux/workqueue.h>
-#include <linux/backing-dev-defs.h>
 #include <linux/wait.h>
 #include <linux/mempool.h>
 #include <linux/pfn.h>
@@ -398,8 +397,6 @@ struct request_queue {
        struct blk_mq_hw_ctx    **queue_hw_ctx;
        unsigned int            nr_hw_queues;
 
-       struct backing_dev_info *backing_dev_info;
-
        /*
         * The queue owner gets to use this for whatever they like.
         * ll_rw_blk doesn't touch it.
@@ -424,6 +421,8 @@ struct request_queue {
 
        spinlock_t              queue_lock;
 
+       struct gendisk          *disk;
+
        /*
         * queue kobject
         */
@@ -664,8 +663,6 @@ extern void blk_clear_pm_only(struct request_queue *q);
        dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \
        (dir), (attrs))
 
-#define queue_to_disk(q)       (dev_to_disk(kobj_to_dev((q)->kobj.parent)))
-
 static inline bool queue_is_mq(struct request_queue *q)
 {
        return q->mq_ops;
@@ -941,6 +938,10 @@ static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
 #define SECTOR_SIZE (1 << SECTOR_SHIFT)
 #endif
 
+#define PAGE_SECTORS_SHIFT     (PAGE_SHIFT - SECTOR_SHIFT)
+#define PAGE_SECTORS           (1 << PAGE_SECTORS_SHIFT)
+#define SECTOR_MASK            (PAGE_SECTORS - 1)
+
 /*
  * blk_rq_pos()                        : the current sector
  * blk_rq_bytes()              : bytes left in the entire request
@@ -1139,7 +1140,7 @@ void blk_queue_zone_write_granularity(struct request_queue *q,
                                      unsigned int size);
 extern void blk_queue_alignment_offset(struct request_queue *q,
                                       unsigned int alignment);
-void blk_queue_update_readahead(struct request_queue *q);
+void disk_update_readahead(struct gendisk *disk);
 extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
 extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
 extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
@@ -1521,6 +1522,22 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector
        return offset << SECTOR_SHIFT;
 }
 
+/*
+ * Two cases of handling DISCARD merge:
+ * If max_discard_segments > 1, the driver takes every bio
+ * as a range and send them to controller together. The ranges
+ * needn't to be contiguous.
+ * Otherwise, the bios/requests will be handled as same as
+ * others which should be contiguous.
+ */
+static inline bool blk_discard_mergable(struct request *req)
+{
+       if (req_op(req) == REQ_OP_DISCARD &&
+           queue_max_discard_segments(req->q) > 1)
+               return true;
+       return false;
+}
+
 static inline int bdev_discard_alignment(struct block_device *bdev)
 {
        struct request_queue *q = bdev_get_queue(bdev);
@@ -1855,6 +1872,13 @@ struct block_device_operations {
        char *(*devnode)(struct gendisk *disk, umode_t *mode);
        struct module *owner;
        const struct pr_ops *pr_ops;
+
+       /*
+        * Special callback for probing GPT entry at a given sector.
+        * Needed by Android devices, used by GPT scanner and MMC blk
+        * driver.
+        */
+       int (*alternative_gpt_sector)(struct gendisk *disk, sector_t *sector);
 };
 
 #ifdef CONFIG_COMPAT
@@ -1984,8 +2008,6 @@ void blkdev_put_no_open(struct block_device *bdev);
 struct block_device *bdev_alloc(struct gendisk *disk, u8 partno);
 void bdev_add(struct block_device *bdev, dev_t dev);
 struct block_device *I_BDEV(struct inode *inode);
-struct block_device *bdgrab(struct block_device *bdev);
-void bdput(struct block_device *);
 int truncate_bdev_range(struct block_device *bdev, fmode_t mode, loff_t lstart,
                loff_t lend);
 
index 8b77d08..6c9b10d 100644 (file)
@@ -201,8 +201,8 @@ static inline void bpf_cgroup_storage_unset(void)
 {
        int i;
 
-       for (i = 0; i < BPF_CGROUP_STORAGE_NEST_MAX; i++) {
-               if (unlikely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current))
+       for (i = BPF_CGROUP_STORAGE_NEST_MAX - 1; i >= 0; i--) {
+               if (likely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current))
                        continue;
 
                this_cpu_write(bpf_cgroup_storage_info[i].task, NULL);
index a9db1ea..ae3ac3a 100644 (file)
@@ -134,4 +134,5 @@ BPF_LINK_TYPE(BPF_LINK_TYPE_CGROUP, cgroup)
 BPF_LINK_TYPE(BPF_LINK_TYPE_ITER, iter)
 #ifdef CONFIG_NET
 BPF_LINK_TYPE(BPF_LINK_TYPE_NETNS, netns)
+BPF_LINK_TYPE(BPF_LINK_TYPE_XDP, xdp)
 #endif
index e774ecc..828d08a 100644 (file)
@@ -340,8 +340,8 @@ struct bpf_insn_aux_data {
        };
        u64 map_key_state; /* constant (32 bit) key tracking for maps */
        int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
-       int sanitize_stack_off; /* stack slot to be cleared */
        u32 seen; /* this insn was processed by the verifier at env->pass_cnt */
+       bool sanitize_stack_spill; /* subject to Spectre v4 sanitation */
        bool zext_dst; /* this insn zero extends dst reg */
        u8 alu_state; /* used in combination with alu_limit */
 
@@ -414,6 +414,7 @@ struct bpf_verifier_env {
        u32 used_map_cnt;               /* number of used maps */
        u32 used_btf_cnt;               /* number of used BTF objects */
        u32 id_gen;                     /* used to generate unique reg IDs */
+       bool explore_alu_limits;
        bool allow_ptr_leaks;
        bool allow_uninit_stack;
        bool allow_ptr_to_map_access;
index ff832e6..0e9bdd4 100644 (file)
@@ -4,9 +4,10 @@
  *
  * Copyright (C) 2001 Ming Lei <ming.lei@canonical.com>
  */
-#ifndef __LINUX_BVEC_ITER_H
-#define __LINUX_BVEC_ITER_H
+#ifndef __LINUX_BVEC_H
+#define __LINUX_BVEC_H
 
+#include <linux/highmem.h>
 #include <linux/bug.h>
 #include <linux/errno.h>
 #include <linux/limits.h>
@@ -183,4 +184,61 @@ static inline void bvec_advance(const struct bio_vec *bvec,
        }
 }
 
-#endif /* __LINUX_BVEC_ITER_H */
+/**
+ * bvec_kmap_local - map a bvec into the kernel virtual address space
+ * @bvec: bvec to map
+ *
+ * Must be called on single-page bvecs only.  Call kunmap_local on the returned
+ * address to unmap.
+ */
+static inline void *bvec_kmap_local(struct bio_vec *bvec)
+{
+       return kmap_local_page(bvec->bv_page) + bvec->bv_offset;
+}
+
+/**
+ * memcpy_from_bvec - copy data from a bvec
+ * @bvec: bvec to copy from
+ *
+ * Must be called on single-page bvecs only.
+ */
+static inline void memcpy_from_bvec(char *to, struct bio_vec *bvec)
+{
+       memcpy_from_page(to, bvec->bv_page, bvec->bv_offset, bvec->bv_len);
+}
+
+/**
+ * memcpy_to_bvec - copy data to a bvec
+ * @bvec: bvec to copy to
+ *
+ * Must be called on single-page bvecs only.
+ */
+static inline void memcpy_to_bvec(struct bio_vec *bvec, const char *from)
+{
+       memcpy_to_page(bvec->bv_page, bvec->bv_offset, from, bvec->bv_len);
+}
+
+/**
+ * memzero_bvec - zero all data in a bvec
+ * @bvec: bvec to zero
+ *
+ * Must be called on single-page bvecs only.
+ */
+static inline void memzero_bvec(struct bio_vec *bvec)
+{
+       memzero_page(bvec->bv_page, bvec->bv_offset, bvec->bv_len);
+}
+
+/**
+ * bvec_virt - return the virtual address for a bvec
+ * @bvec: bvec to return the virtual address for
+ *
+ * Note: the caller must ensure that @bvec->bv_page is not a highmem page.
+ */
+static inline void *bvec_virt(struct bio_vec *bvec)
+{
+       WARN_ON_ONCE(PageHighMem(bvec->bv_page));
+       return page_address(bvec->bv_page) + bvec->bv_offset;
+}
+
+#endif /* __LINUX_BVEC_H */
diff --git a/include/linux/cmdline-parser.h b/include/linux/cmdline-parser.h
deleted file mode 100644 (file)
index 68a5418..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Parsing command line, get the partitions information.
- *
- * Written by Cai Zhiyong <caizhiyong@huawei.com>
- *
- */
-#ifndef CMDLINEPARSEH
-#define CMDLINEPARSEH
-
-#include <linux/blkdev.h>
-#include <linux/fs.h>
-#include <linux/slab.h>
-
-/* partition flags */
-#define PF_RDONLY                   0x01 /* Device is read only */
-#define PF_POWERUP_LOCK             0x02 /* Always locked after reset */
-
-struct cmdline_subpart {
-       char name[BDEVNAME_SIZE]; /* partition name, such as 'rootfs' */
-       sector_t from;
-       sector_t size;
-       int flags;
-       struct cmdline_subpart *next_subpart;
-};
-
-struct cmdline_parts {
-       char name[BDEVNAME_SIZE]; /* block device, such as 'mmcblk0' */
-       unsigned int nr_subparts;
-       struct cmdline_subpart *subpart;
-       struct cmdline_parts *next_parts;
-};
-
-void cmdline_parts_free(struct cmdline_parts **parts);
-
-int cmdline_parts_parse(struct cmdline_parts **parts, const char *cmdline);
-
-struct cmdline_parts *cmdline_parts_find(struct cmdline_parts *parts,
-                                        const char *bdev);
-
-int cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size,
-                     int slot,
-                     int (*add_part)(int, struct cmdline_subpart *, void *),
-                     void *param);
-
-#endif /* CMDLINEPARSEH */
index f39b34b..6ac543d 100644 (file)
@@ -399,7 +399,7 @@ static inline int cpuhp_state_remove_instance(enum cpuhp_state state,
 
 /**
  * cpuhp_state_remove_instance_nocalls - Remove hotplug instance from state
- *                                      without invoking the reatdown callback
+ *                                      without invoking the teardown callback
  * @state:     The state from which the instance is removed
  * @node:      The node for this individual state.
  *
index 04c20de..d2b9c41 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/cpumask.h>
 #include <linux/nodemask.h>
 #include <linux/mm.h>
+#include <linux/mmu_context.h>
 #include <linux/jump_label.h>
 
 #ifdef CONFIG_CPUSETS
@@ -58,7 +59,7 @@ extern void cpuset_wait_for_hotplug(void);
 extern void cpuset_read_lock(void);
 extern void cpuset_read_unlock(void);
 extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
-extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
+extern bool cpuset_cpus_allowed_fallback(struct task_struct *p);
 extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
 #define cpuset_current_mems_allowed (current->mems_allowed)
 void cpuset_init_current_mems_allowed(void);
@@ -184,11 +185,12 @@ static inline void cpuset_read_unlock(void) { }
 static inline void cpuset_cpus_allowed(struct task_struct *p,
                                       struct cpumask *mask)
 {
-       cpumask_copy(mask, cpu_possible_mask);
+       cpumask_copy(mask, task_cpu_possible_mask(p));
 }
 
-static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
+static inline bool cpuset_cpus_allowed_fallback(struct task_struct *p)
 {
+       return false;
 }
 
 static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
index edb5c18..3f49e65 100644 (file)
@@ -3,8 +3,7 @@
 #define __LINUX_DEBUG_LOCKING_H
 
 #include <linux/atomic.h>
-#include <linux/bug.h>
-#include <linux/printk.h>
+#include <linux/cache.h>
 
 struct task_struct;
 
index 7457d49..94f2cd6 100644 (file)
@@ -151,7 +151,6 @@ typedef size_t (*dm_dax_copy_iter_fn)(struct dm_target *ti, pgoff_t pgoff,
                void *addr, size_t bytes, struct iov_iter *i);
 typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *ti, pgoff_t pgoff,
                size_t nr_pages);
-#define PAGE_SECTORS (PAGE_SIZE / 512)
 
 void dm_error(const char *message);
 
index 59940f1..65d84b6 100644 (file)
@@ -407,6 +407,7 @@ struct dev_links_info {
  * @em_pd:     device's energy model performance domain
  * @pins:      For device pin management.
  *             See Documentation/driver-api/pin-control.rst for details.
+ * @msi_lock:  Lock to protect MSI mask cache and mask register
  * @msi_list:  Hosts MSI descriptors
  * @msi_domain: The generic MSI domain this device is using.
  * @numa_node: NUMA node this device is close to.
@@ -506,6 +507,7 @@ struct device {
        struct dev_pin_info     *pins;
 #endif
 #ifdef CONFIG_GENERIC_MSI_IRQ
+       raw_spinlock_t          msi_lock;
        struct list_head        msi_list;
 #endif
 #ifdef CONFIG_DMA_OPS
index 76d3562..4207d06 100644 (file)
@@ -184,6 +184,7 @@ static inline char *mc_event_error_type(const unsigned int err_type)
  * @MEM_DDR5:          Unbuffered DDR5 RAM
  * @MEM_NVDIMM:                Non-volatile RAM
  * @MEM_WIO2:          Wide I/O 2.
+ * @MEM_HBM2:          High bandwidth Memory Gen 2.
  */
 enum mem_type {
        MEM_EMPTY = 0,
@@ -212,6 +213,7 @@ enum mem_type {
        MEM_DDR5,
        MEM_NVDIMM,
        MEM_WIO2,
+       MEM_HBM2,
 };
 
 #define MEM_FLAG_EMPTY         BIT(MEM_EMPTY)
@@ -239,6 +241,7 @@ enum mem_type {
 #define MEM_FLAG_DDR5           BIT(MEM_DDR5)
 #define MEM_FLAG_NVDIMM         BIT(MEM_NVDIMM)
 #define MEM_FLAG_WIO2          BIT(MEM_WIO2)
+#define MEM_FLAG_HBM2          BIT(MEM_HBM2)
 
 /**
  * enum edac_type - Error Detection and Correction capabilities and mode
index fa0a524..305d5f1 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/err.h>
 #include <linux/percpu-defs.h>
 #include <linux/percpu.h>
+#include <linux/sched.h>
 
 /*
  * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining
@@ -43,11 +44,9 @@ int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *w
                                  __u64 *cnt);
 void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt);
 
-DECLARE_PER_CPU(int, eventfd_wake_count);
-
-static inline bool eventfd_signal_count(void)
+static inline bool eventfd_signal_allowed(void)
 {
-       return this_cpu_read(eventfd_wake_count);
+       return !current->in_eventfd_signal;
 }
 
 #else /* CONFIG_EVENTFD */
@@ -78,9 +77,9 @@ static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx,
        return -ENOSYS;
 }
 
-static inline bool eventfd_signal_count(void)
+static inline bool eventfd_signal_allowed(void)
 {
-       return false;
+       return true;
 }
 
 static inline void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt)
index a16dbec..eec3b7c 100644 (file)
@@ -27,6 +27,8 @@ extern struct ctl_table fanotify_table[]; /* for sysctl */
 
 #define FANOTIFY_FID_BITS      (FAN_REPORT_FID | FAN_REPORT_DFID_NAME)
 
+#define FANOTIFY_INFO_MODES    (FANOTIFY_FID_BITS | FAN_REPORT_PIDFD)
+
 /*
  * fanotify_init() flags that require CAP_SYS_ADMIN.
  * We do not allow unprivileged groups to request permission events.
@@ -35,6 +37,7 @@ extern struct ctl_table fanotify_table[]; /* for sysctl */
  */
 #define FANOTIFY_ADMIN_INIT_FLAGS      (FANOTIFY_PERM_CLASSES | \
                                         FAN_REPORT_TID | \
+                                        FAN_REPORT_PIDFD | \
                                         FAN_UNLIMITED_QUEUE | \
                                         FAN_UNLIMITED_MARKS)
 
index 4e624c4..c50882f 100644 (file)
@@ -18,8 +18,4 @@ int fiemap_prep(struct inode *inode, struct fiemap_extent_info *fieinfo,
 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
                            u64 phys, u64 len, u32 flags);
 
-int generic_block_fiemap(struct inode *inode,
-               struct fiemap_extent_info *fieinfo, u64 start, u64 len,
-               get_block_t *get_block);
-
 #endif /* _LINUX_FIEMAP_H 1 */
index 472f970..83b8960 100644 (file)
@@ -73,6 +73,11 @@ struct ctl_table_header;
 /* unused opcode to mark call to interpreter with arguments */
 #define BPF_CALL_ARGS  0xe0
 
+/* unused opcode to mark speculation barrier for mitigating
+ * Speculative Store Bypass
+ */
+#define BPF_NOSPEC     0xc0
+
 /* As per nm, we expose JITed images as text (code) section for
  * kallsyms. That way, tools like perf can find it to match
  * addresses.
@@ -390,6 +395,16 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
                .off   = 0,                                     \
                .imm   = 0 })
 
+/* Speculation barrier */
+
+#define BPF_ST_NOSPEC()                                                \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_ST | BPF_NOSPEC,                   \
+               .dst_reg = 0,                                   \
+               .src_reg = 0,                                   \
+               .off   = 0,                                     \
+               .imm   = 0 })
+
 /* Internal classic blocks for direct assignment */
 
 #define __BPF_STMT(CODE, K)                                    \
index 6405742..7eae53f 100644 (file)
@@ -436,6 +436,10 @@ int pagecache_write_end(struct file *, struct address_space *mapping,
  * struct address_space - Contents of a cacheable, mappable object.
  * @host: Owner, either the inode or the block_device.
  * @i_pages: Cached pages.
+ * @invalidate_lock: Guards coherency between page cache contents and
+ *   file offset->disk block mappings in the filesystem during invalidates.
+ *   It is also used to block modification of page cache contents through
+ *   memory mappings.
  * @gfp_mask: Memory allocation flags to use for allocating pages.
  * @i_mmap_writable: Number of VM_SHARED mappings.
  * @nr_thps: Number of THPs in the pagecache (non-shmem only).
@@ -453,6 +457,7 @@ int pagecache_write_end(struct file *, struct address_space *mapping,
 struct address_space {
        struct inode            *host;
        struct xarray           i_pages;
+       struct rw_semaphore     invalidate_lock;
        gfp_t                   gfp_mask;
        atomic_t                i_mmap_writable;
 #ifdef CONFIG_READ_ONLY_THP_FOR_FS
@@ -814,9 +819,42 @@ static inline void inode_lock_shared_nested(struct inode *inode, unsigned subcla
        down_read_nested(&inode->i_rwsem, subclass);
 }
 
+static inline void filemap_invalidate_lock(struct address_space *mapping)
+{
+       down_write(&mapping->invalidate_lock);
+}
+
+static inline void filemap_invalidate_unlock(struct address_space *mapping)
+{
+       up_write(&mapping->invalidate_lock);
+}
+
+static inline void filemap_invalidate_lock_shared(struct address_space *mapping)
+{
+       down_read(&mapping->invalidate_lock);
+}
+
+static inline int filemap_invalidate_trylock_shared(
+                                       struct address_space *mapping)
+{
+       return down_read_trylock(&mapping->invalidate_lock);
+}
+
+static inline void filemap_invalidate_unlock_shared(
+                                       struct address_space *mapping)
+{
+       up_read(&mapping->invalidate_lock);
+}
+
 void lock_two_nondirectories(struct inode *, struct inode*);
 void unlock_two_nondirectories(struct inode *, struct inode*);
 
+void filemap_invalidate_lock_two(struct address_space *mapping1,
+                                struct address_space *mapping2);
+void filemap_invalidate_unlock_two(struct address_space *mapping1,
+                                  struct address_space *mapping2);
+
+
 /*
  * NOTE: in a 32bit arch with a preemptable kernel and
  * an UP compile the i_size_read/write must be atomic
@@ -1507,8 +1545,11 @@ struct super_block {
        /* Number of inodes with nlink == 0 but still referenced */
        atomic_long_t s_remove_count;
 
-       /* Pending fsnotify inode refs */
-       atomic_long_t s_fsnotify_inode_refs;
+       /*
+        * Number of inode/mount/sb objects that are being watched, note that
+        * inodes objects are currently double-accounted.
+        */
+       atomic_long_t s_fsnotify_connectors;
 
        /* Being remounted read-only */
        int s_readonly_remount;
@@ -2487,6 +2528,7 @@ struct file_system_type {
 
        struct lock_class_key i_lock_key;
        struct lock_class_key i_mutex_key;
+       struct lock_class_key invalidate_lock_key;
        struct lock_class_key i_mutex_dir_key;
 };
 
@@ -2570,90 +2612,6 @@ extern struct kobject *fs_kobj;
 
 #define MAX_RW_COUNT (INT_MAX & PAGE_MASK)
 
-#ifdef CONFIG_MANDATORY_FILE_LOCKING
-extern int locks_mandatory_locked(struct file *);
-extern int locks_mandatory_area(struct inode *, struct file *, loff_t, loff_t, unsigned char);
-
-/*
- * Candidates for mandatory locking have the setgid bit set
- * but no group execute bit -  an otherwise meaningless combination.
- */
-
-static inline int __mandatory_lock(struct inode *ino)
-{
-       return (ino->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID;
-}
-
-/*
- * ... and these candidates should be on SB_MANDLOCK mounted fs,
- * otherwise these will be advisory locks
- */
-
-static inline int mandatory_lock(struct inode *ino)
-{
-       return IS_MANDLOCK(ino) && __mandatory_lock(ino);
-}
-
-static inline int locks_verify_locked(struct file *file)
-{
-       if (mandatory_lock(locks_inode(file)))
-               return locks_mandatory_locked(file);
-       return 0;
-}
-
-static inline int locks_verify_truncate(struct inode *inode,
-                                   struct file *f,
-                                   loff_t size)
-{
-       if (!inode->i_flctx || !mandatory_lock(inode))
-               return 0;
-
-       if (size < inode->i_size) {
-               return locks_mandatory_area(inode, f, size, inode->i_size - 1,
-                               F_WRLCK);
-       } else {
-               return locks_mandatory_area(inode, f, inode->i_size, size - 1,
-                               F_WRLCK);
-       }
-}
-
-#else /* !CONFIG_MANDATORY_FILE_LOCKING */
-
-static inline int locks_mandatory_locked(struct file *file)
-{
-       return 0;
-}
-
-static inline int locks_mandatory_area(struct inode *inode, struct file *filp,
-                                       loff_t start, loff_t end, unsigned char type)
-{
-       return 0;
-}
-
-static inline int __mandatory_lock(struct inode *inode)
-{
-       return 0;
-}
-
-static inline int mandatory_lock(struct inode *inode)
-{
-       return 0;
-}
-
-static inline int locks_verify_locked(struct file *file)
-{
-       return 0;
-}
-
-static inline int locks_verify_truncate(struct inode *inode, struct file *filp,
-                                       size_t size)
-{
-       return 0;
-}
-
-#endif /* CONFIG_MANDATORY_FILE_LOCKING */
-
-
 #ifdef CONFIG_FILE_LOCKING
 static inline int break_lease(struct inode *inode, unsigned int mode)
 {
@@ -3246,10 +3204,6 @@ ssize_t vfs_iocb_iter_read(struct file *file, struct kiocb *iocb,
 ssize_t vfs_iocb_iter_write(struct file *file, struct kiocb *iocb,
                            struct iov_iter *iter);
 
-/* fs/block_dev.c */
-extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
-                       int datasync);
-
 /* fs/splice.c */
 extern ssize_t generic_file_splice_read(struct file *, loff_t *,
                struct pipe_inode_info *, size_t, unsigned int);
index f8acddc..12d3a7d 100644 (file)
@@ -30,6 +30,9 @@ static inline void fsnotify_name(struct inode *dir, __u32 mask,
                                 struct inode *child,
                                 const struct qstr *name, u32 cookie)
 {
+       if (atomic_long_read(&dir->i_sb->s_fsnotify_connectors) == 0)
+               return;
+
        fsnotify(mask, child, FSNOTIFY_EVENT_INODE, dir, name, NULL, cookie);
 }
 
@@ -41,6 +44,9 @@ static inline void fsnotify_dirent(struct inode *dir, struct dentry *dentry,
 
 static inline void fsnotify_inode(struct inode *inode, __u32 mask)
 {
+       if (atomic_long_read(&inode->i_sb->s_fsnotify_connectors) == 0)
+               return;
+
        if (S_ISDIR(inode->i_mode))
                mask |= FS_ISDIR;
 
@@ -53,6 +59,9 @@ static inline int fsnotify_parent(struct dentry *dentry, __u32 mask,
 {
        struct inode *inode = d_inode(dentry);
 
+       if (atomic_long_read(&inode->i_sb->s_fsnotify_connectors) == 0)
+               return 0;
+
        if (S_ISDIR(inode->i_mode)) {
                mask |= FS_ISDIR;
 
index a69f363..832e65f 100644 (file)
@@ -643,6 +643,22 @@ static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
 extern int ftrace_make_nop(struct module *mod,
                           struct dyn_ftrace *rec, unsigned long addr);
 
+/**
+ * ftrace_need_init_nop - return whether nop call sites should be initialized
+ *
+ * Normally the compiler's -mnop-mcount generates suitable nops, so we don't
+ * need to call ftrace_init_nop() if the code is built with that flag.
+ * Architectures where this is not always the case may define their own
+ * condition.
+ *
+ * Return must be:
+ *  0      if ftrace_init_nop() should be called
+ *  Nonzero if ftrace_init_nop() should not be called
+ */
+
+#ifndef ftrace_need_init_nop
+#define ftrace_need_init_nop() (!__is_defined(CC_USING_NOP_MCOUNT))
+#endif
 
 /**
  * ftrace_init_nop - initialize a nop call site
index 13b3417..c68d83c 100644 (file)
@@ -60,9 +60,6 @@ struct partition_meta_info {
  * device.
  * Affects responses to the ``CDROM_GET_CAPABILITY`` ioctl.
  *
- * ``GENHD_FL_UP`` (0x0010): indicates that the block device is "up",
- * with a similar meaning to network interfaces.
- *
  * ``GENHD_FL_SUPPRESS_PARTITION_INFO`` (0x0020): don't include
  * partition information in ``/proc/partitions`` or in the output of
  * printk_all_partitions().
@@ -97,7 +94,6 @@ struct partition_meta_info {
 /* 2 is unused (used to be GENHD_FL_DRIVERFS) */
 /* 4 is unused (used to be GENHD_FL_MEDIA_CHANGE_NOTIFY) */
 #define GENHD_FL_CD                            0x0008
-#define GENHD_FL_UP                            0x0010
 #define GENHD_FL_SUPPRESS_PARTITION_INFO       0x0020
 #define GENHD_FL_EXT_DEVT                      0x0040
 #define GENHD_FL_NATIVE_CAPACITY               0x0080
@@ -153,13 +149,15 @@ struct gendisk {
        unsigned long state;
 #define GD_NEED_PART_SCAN              0
 #define GD_READ_ONLY                   1
-#define GD_QUEUE_REF                   2
 
        struct mutex open_mutex;        /* open/close mutex */
        unsigned open_partitions;       /* number of open partitions */
 
+       struct backing_dev_info *bdi;
        struct kobject *slave_dir;
-
+#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
+       struct list_head slave_bdevs;
+#endif
        struct timer_rand_state *random;
        atomic_t sync_io;               /* RAID */
        struct disk_events *ev;
@@ -172,8 +170,14 @@ struct gendisk {
        int node_id;
        struct badblocks *bb;
        struct lockdep_map lockdep_map;
+       u64 diskseq;
 };
 
+static inline bool disk_live(struct gendisk *disk)
+{
+       return !inode_unhashed(disk->part0->bd_inode);
+}
+
 /*
  * The gendisk is refcounted by the part0 block_device, and the bd_device
  * therein is also used for device model presentation in sysfs.
@@ -210,18 +214,12 @@ static inline dev_t disk_devt(struct gendisk *disk)
 void disk_uevent(struct gendisk *disk, enum kobject_action action);
 
 /* block/genhd.c */
-extern void device_add_disk(struct device *parent, struct gendisk *disk,
-                           const struct attribute_group **groups);
-static inline void add_disk(struct gendisk *disk)
+int device_add_disk(struct device *parent, struct gendisk *disk,
+               const struct attribute_group **groups);
+static inline int add_disk(struct gendisk *disk)
 {
-       device_add_disk(NULL, disk, NULL);
+       return device_add_disk(NULL, disk, NULL);
 }
-extern void device_add_disk_no_queue_reg(struct device *parent, struct gendisk *disk);
-static inline void add_disk_no_queue_reg(struct gendisk *disk)
-{
-       device_add_disk_no_queue_reg(NULL, disk);
-}
-
 extern void del_gendisk(struct gendisk *gp);
 
 void set_disk_ro(struct gendisk *disk, bool read_only);
@@ -236,6 +234,7 @@ extern void disk_block_events(struct gendisk *disk);
 extern void disk_unblock_events(struct gendisk *disk);
 extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
 bool set_capacity_and_notify(struct gendisk *disk, sector_t size);
+bool disk_force_media_change(struct gendisk *disk, unsigned int events);
 
 /* drivers/char/random.c */
 extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
@@ -259,26 +258,10 @@ static inline sector_t get_capacity(struct gendisk *disk)
 int bdev_disk_changed(struct gendisk *disk, bool invalidate);
 void blk_drop_partitions(struct gendisk *disk);
 
-extern struct gendisk *__alloc_disk_node(int minors, int node_id);
+struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
+               struct lock_class_key *lkclass);
 extern void put_disk(struct gendisk *disk);
-
-#define alloc_disk_node(minors, node_id)                               \
-({                                                                     \
-       static struct lock_class_key __key;                             \
-       const char *__name;                                             \
-       struct gendisk *__disk;                                         \
-                                                                       \
-       __name = "(gendisk_completion)"#minors"("#node_id")";           \
-                                                                       \
-       __disk = __alloc_disk_node(minors, node_id);                    \
-                                                                       \
-       if (__disk)                                                     \
-               lockdep_init_map(&__disk->lockdep_map, __name, &__key, 0); \
-                                                                       \
-       __disk;                                                         \
-})
-
-#define alloc_disk(minors) alloc_disk_node(minors, NUMA_NO_NODE)
+struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass);
 
 /**
  * blk_alloc_disk - allocate a gendisk structure
@@ -291,15 +274,10 @@ extern void put_disk(struct gendisk *disk);
  */
 #define blk_alloc_disk(node_id)                                                \
 ({                                                                     \
-       struct gendisk *__disk = __blk_alloc_disk(node_id);             \
        static struct lock_class_key __key;                             \
                                                                        \
-       if (__disk)                                                     \
-               lockdep_init_map(&__disk->lockdep_map,                  \
-                       "(bio completion)", &__key, 0);                 \
-       __disk;                                                         \
+       __blk_alloc_disk(node_id, &__key);                              \
 })
-struct gendisk *__blk_alloc_disk(int node);
 void blk_cleanup_disk(struct gendisk *disk);
 
 int __register_blkdev(unsigned int major, const char *name,
@@ -316,9 +294,10 @@ void set_capacity(struct gendisk *disk, sector_t size);
 int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long);
 long compat_blkdev_ioctl(struct file *, unsigned, unsigned long);
 
-#ifdef CONFIG_SYSFS
+#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
 int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
 void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk);
+int bd_register_pending_holders(struct gendisk *disk);
 #else
 static inline int bd_link_disk_holder(struct block_device *bdev,
                                      struct gendisk *disk)
@@ -329,9 +308,14 @@ static inline void bd_unlink_disk_holder(struct block_device *bdev,
                                         struct gendisk *disk)
 {
 }
-#endif /* CONFIG_SYSFS */
+static inline int bd_register_pending_holders(struct gendisk *disk)
+{
+       return 0;
+}
+#endif /* CONFIG_BLOCK_HOLDER_DEPRECATED */
 
 dev_t part_devt(struct gendisk *disk, u8 partno);
+void inc_diskseq(struct gendisk *disk);
 dev_t blk_lookup_devt(const char *name, int partno);
 void blk_request_module(dev_t devt);
 #ifdef CONFIG_BLOCK
index bb5e7b0..0ee1401 100644 (file)
@@ -318,16 +318,12 @@ struct clock_event_device;
 
 extern void hrtimer_interrupt(struct clock_event_device *dev);
 
-extern void clock_was_set_delayed(void);
-
 extern unsigned int hrtimer_resolution;
 
 #else
 
 #define hrtimer_resolution     (unsigned int)LOW_RES_NSEC
 
-static inline void clock_was_set_delayed(void) { }
-
 #endif
 
 static inline ktime_t
@@ -351,13 +347,13 @@ hrtimer_expires_remaining_adjusted(const struct hrtimer *timer)
                                                    timer->base->get_time());
 }
 
-extern void clock_was_set(void);
 #ifdef CONFIG_TIMERFD
 extern void timerfd_clock_was_set(void);
+extern void timerfd_resume(void);
 #else
 static inline void timerfd_clock_was_set(void) { }
+static inline void timerfd_resume(void) { }
 #endif
-extern void hrtimers_resume(void);
 
 DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
 
index 53aa034..aaf4f1b 100644 (file)
@@ -41,7 +41,7 @@ struct in_device {
        unsigned long           mr_qri;         /* Query Response Interval */
        unsigned char           mr_qrv;         /* Query Robustness Variable */
        unsigned char           mr_gq_running;
-       unsigned char           mr_ifc_count;
+       u32                     mr_ifc_count;
        struct timer_list       mr_gq_timer;    /* general query timer */
        struct timer_list       mr_ifc_timer;   /* interface change timer */
 
index 25e2b4e..aee8ff4 100644 (file)
@@ -81,6 +81,8 @@ int ishtp_register_event_cb(struct ishtp_cl_device *device,
 
 /* Get the device * from ishtp device instance */
 struct device *ishtp_device(struct ishtp_cl_device *cl_device);
+/* wait for IPC resume */
+bool ishtp_wait_resume(struct ishtp_device *dev);
 /* Trace interface for clients */
 ishtp_print_log ishtp_trace_callback(struct ishtp_cl_device *cl_device);
 /* Get device pointer of PCI device for DMA acces */
index 2ed65b0..1f22a30 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/hrtimer.h>
 #include <linux/kref.h>
 #include <linux/workqueue.h>
+#include <linux/jump_label.h>
 
 #include <linux/atomic.h>
 #include <asm/ptrace.h>
@@ -474,12 +475,13 @@ extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
 
 #ifdef CONFIG_IRQ_FORCED_THREADING
 # ifdef CONFIG_PREEMPT_RT
-#  define force_irqthreads     (true)
+#  define force_irqthreads()   (true)
 # else
-extern bool force_irqthreads;
+DECLARE_STATIC_KEY_FALSE(force_irqthreads_key);
+#  define force_irqthreads()   (static_branch_unlikely(&force_irqthreads_key))
 # endif
 #else
-#define force_irqthreads       (0)
+#define force_irqthreads()     (false)
 #endif
 
 #ifndef local_softirq_pending
index e9bfe69..3f53bc2 100644 (file)
@@ -6,46 +6,22 @@
 #include <linux/sched/rt.h>
 #include <linux/iocontext.h>
 
-/*
- * Gives us 8 prio classes with 13-bits of data for each class
- */
-#define IOPRIO_CLASS_SHIFT     (13)
-#define IOPRIO_PRIO_MASK       ((1UL << IOPRIO_CLASS_SHIFT) - 1)
-
-#define IOPRIO_PRIO_CLASS(mask)        ((mask) >> IOPRIO_CLASS_SHIFT)
-#define IOPRIO_PRIO_DATA(mask) ((mask) & IOPRIO_PRIO_MASK)
-#define IOPRIO_PRIO_VALUE(class, data) (((class) << IOPRIO_CLASS_SHIFT) | data)
-
-#define ioprio_valid(mask)     (IOPRIO_PRIO_CLASS((mask)) != IOPRIO_CLASS_NONE)
+#include <uapi/linux/ioprio.h>
 
 /*
- * These are the io priority groups as implemented by CFQ. RT is the realtime
- * class, it always gets premium service. BE is the best-effort scheduling
- * class, the default for any process. IDLE is the idle scheduling class, it
- * is only served when no one else is using the disk.
+ * Default IO priority.
  */
-enum {
-       IOPRIO_CLASS_NONE,
-       IOPRIO_CLASS_RT,
-       IOPRIO_CLASS_BE,
-       IOPRIO_CLASS_IDLE,
-};
+#define IOPRIO_DEFAULT IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_BE_NORM)
 
 /*
- * 8 best effort priority levels are supported
+ * Check that a priority value has a valid class.
  */
-#define IOPRIO_BE_NR   (8)
-
-enum {
-       IOPRIO_WHO_PROCESS = 1,
-       IOPRIO_WHO_PGRP,
-       IOPRIO_WHO_USER,
-};
+static inline bool ioprio_valid(unsigned short ioprio)
+{
+       unsigned short class = IOPRIO_PRIO_CLASS(ioprio);
 
-/*
- * Fallback BE priority
- */
-#define IOPRIO_NORM    (4)
+       return class > IOPRIO_CLASS_NONE && class <= IOPRIO_CLASS_IDLE;
+}
 
 /*
  * if process has set io priority explicitly, use that. if not, convert
@@ -80,7 +56,7 @@ static inline int get_current_ioprio(void)
 
        if (ioc)
                return ioc->ioprio;
-       return IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
+       return IOPRIO_DEFAULT;
 }
 
 /*
index 8e9a9ae..c8293c8 100644 (file)
@@ -569,6 +569,7 @@ struct irq_chip {
  * IRQCHIP_SUPPORTS_NMI:              Chip can deliver NMIs, only for root irqchips
  * IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND:  Invokes __enable_irq()/__disable_irq() for wake irqs
  *                                    in the suspend path if they are in disabled state
+ * IRQCHIP_AFFINITY_PRE_STARTUP:      Default affinity update before startup
  */
 enum {
        IRQCHIP_SET_TYPE_MASKED                 = (1 <<  0),
@@ -581,6 +582,7 @@ enum {
        IRQCHIP_SUPPORTS_LEVEL_MSI              = (1 <<  7),
        IRQCHIP_SUPPORTS_NMI                    = (1 <<  8),
        IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND        = (1 <<  9),
+       IRQCHIP_AFFINITY_PRE_STARTUP            = (1 << 10),
 };
 
 #include <linux/irqdesc.h>
index a70d1ea..3fe6dd8 100644 (file)
@@ -51,10 +51,11 @@ extern atomic_t kfence_allocation_gate;
 static __always_inline bool is_kfence_address(const void *addr)
 {
        /*
-        * The non-NULL check is required in case the __kfence_pool pointer was
-        * never initialized; keep it in the slow-path after the range-check.
+        * The __kfence_pool != NULL check is required to deal with the case
+        * where __kfence_pool == NULL && addr < KFENCE_POOL_SIZE. Keep it in
+        * the slow-path after the range-check!
         */
-       return unlikely((unsigned long)((char *)addr - __kfence_pool) < KFENCE_POOL_SIZE && addr);
+       return unlikely((unsigned long)((char *)addr - __kfence_pool) < KFENCE_POOL_SIZE && __kfence_pool);
 }
 
 /**
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
deleted file mode 100644 (file)
index 0908abd..0000000
+++ /dev/null
@@ -1,697 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef NVM_H
-#define NVM_H
-
-#include <linux/blkdev.h>
-#include <linux/types.h>
-#include <uapi/linux/lightnvm.h>
-
-enum {
-       NVM_IO_OK = 0,
-       NVM_IO_REQUEUE = 1,
-       NVM_IO_DONE = 2,
-       NVM_IO_ERR = 3,
-
-       NVM_IOTYPE_NONE = 0,
-       NVM_IOTYPE_GC = 1,
-};
-
-/* common format */
-#define NVM_GEN_CH_BITS  (8)
-#define NVM_GEN_LUN_BITS (8)
-#define NVM_GEN_BLK_BITS (16)
-#define NVM_GEN_RESERVED (32)
-
-/* 1.2 format */
-#define NVM_12_PG_BITS  (16)
-#define NVM_12_PL_BITS  (4)
-#define NVM_12_SEC_BITS (4)
-#define NVM_12_RESERVED (8)
-
-/* 2.0 format */
-#define NVM_20_SEC_BITS (24)
-#define NVM_20_RESERVED (8)
-
-enum {
-       NVM_OCSSD_SPEC_12 = 12,
-       NVM_OCSSD_SPEC_20 = 20,
-};
-
-struct ppa_addr {
-       /* Generic structure for all addresses */
-       union {
-               /* generic device format */
-               struct {
-                       u64 ch          : NVM_GEN_CH_BITS;
-                       u64 lun         : NVM_GEN_LUN_BITS;
-                       u64 blk         : NVM_GEN_BLK_BITS;
-                       u64 reserved    : NVM_GEN_RESERVED;
-               } a;
-
-               /* 1.2 device format */
-               struct {
-                       u64 ch          : NVM_GEN_CH_BITS;
-                       u64 lun         : NVM_GEN_LUN_BITS;
-                       u64 blk         : NVM_GEN_BLK_BITS;
-                       u64 pg          : NVM_12_PG_BITS;
-                       u64 pl          : NVM_12_PL_BITS;
-                       u64 sec         : NVM_12_SEC_BITS;
-                       u64 reserved    : NVM_12_RESERVED;
-               } g;
-
-               /* 2.0 device format */
-               struct {
-                       u64 grp         : NVM_GEN_CH_BITS;
-                       u64 pu          : NVM_GEN_LUN_BITS;
-                       u64 chk         : NVM_GEN_BLK_BITS;
-                       u64 sec         : NVM_20_SEC_BITS;
-                       u64 reserved    : NVM_20_RESERVED;
-               } m;
-
-               struct {
-                       u64 line        : 63;
-                       u64 is_cached   : 1;
-               } c;
-
-               u64 ppa;
-       };
-};
-
-struct nvm_rq;
-struct nvm_id;
-struct nvm_dev;
-struct nvm_tgt_dev;
-struct nvm_chk_meta;
-
-typedef int (nvm_id_fn)(struct nvm_dev *);
-typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *);
-typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int);
-typedef int (nvm_get_chk_meta_fn)(struct nvm_dev *, sector_t, int,
-                                                       struct nvm_chk_meta *);
-typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *, void *);
-typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *, int);
-typedef void (nvm_destroy_dma_pool_fn)(void *);
-typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
-                                                               dma_addr_t *);
-typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t);
-
-struct nvm_dev_ops {
-       nvm_id_fn               *identity;
-       nvm_op_bb_tbl_fn        *get_bb_tbl;
-       nvm_op_set_bb_fn        *set_bb_tbl;
-
-       nvm_get_chk_meta_fn     *get_chk_meta;
-
-       nvm_submit_io_fn        *submit_io;
-
-       nvm_create_dma_pool_fn  *create_dma_pool;
-       nvm_destroy_dma_pool_fn *destroy_dma_pool;
-       nvm_dev_dma_alloc_fn    *dev_dma_alloc;
-       nvm_dev_dma_free_fn     *dev_dma_free;
-};
-
-#ifdef CONFIG_NVM
-
-#include <linux/file.h>
-#include <linux/dmapool.h>
-
-enum {
-       /* HW Responsibilities */
-       NVM_RSP_L2P     = 1 << 0,
-       NVM_RSP_ECC     = 1 << 1,
-
-       /* Physical Adressing Mode */
-       NVM_ADDRMODE_LINEAR     = 0,
-       NVM_ADDRMODE_CHANNEL    = 1,
-
-       /* Plane programming mode for LUN */
-       NVM_PLANE_SINGLE        = 1,
-       NVM_PLANE_DOUBLE        = 2,
-       NVM_PLANE_QUAD          = 4,
-
-       /* Status codes */
-       NVM_RSP_SUCCESS         = 0x0,
-       NVM_RSP_NOT_CHANGEABLE  = 0x1,
-       NVM_RSP_ERR_FAILWRITE   = 0x40ff,
-       NVM_RSP_ERR_EMPTYPAGE   = 0x42ff,
-       NVM_RSP_ERR_FAILECC     = 0x4281,
-       NVM_RSP_ERR_FAILCRC     = 0x4004,
-       NVM_RSP_WARN_HIGHECC    = 0x4700,
-
-       /* Device opcodes */
-       NVM_OP_PWRITE           = 0x91,
-       NVM_OP_PREAD            = 0x92,
-       NVM_OP_ERASE            = 0x90,
-
-       /* PPA Command Flags */
-       NVM_IO_SNGL_ACCESS      = 0x0,
-       NVM_IO_DUAL_ACCESS      = 0x1,
-       NVM_IO_QUAD_ACCESS      = 0x2,
-
-       /* NAND Access Modes */
-       NVM_IO_SUSPEND          = 0x80,
-       NVM_IO_SLC_MODE         = 0x100,
-       NVM_IO_SCRAMBLE_ENABLE  = 0x200,
-
-       /* Block Types */
-       NVM_BLK_T_FREE          = 0x0,
-       NVM_BLK_T_BAD           = 0x1,
-       NVM_BLK_T_GRWN_BAD      = 0x2,
-       NVM_BLK_T_DEV           = 0x4,
-       NVM_BLK_T_HOST          = 0x8,
-
-       /* Memory capabilities */
-       NVM_ID_CAP_SLC          = 0x1,
-       NVM_ID_CAP_CMD_SUSPEND  = 0x2,
-       NVM_ID_CAP_SCRAMBLE     = 0x4,
-       NVM_ID_CAP_ENCRYPT      = 0x8,
-
-       /* Memory types */
-       NVM_ID_FMTYPE_SLC       = 0,
-       NVM_ID_FMTYPE_MLC       = 1,
-
-       /* Device capabilities */
-       NVM_ID_DCAP_BBLKMGMT    = 0x1,
-       NVM_UD_DCAP_ECC         = 0x2,
-};
-
-struct nvm_id_lp_mlc {
-       u16     num_pairs;
-       u8      pairs[886];
-};
-
-struct nvm_id_lp_tbl {
-       __u8    id[8];
-       struct nvm_id_lp_mlc mlc;
-};
-
-struct nvm_addrf_12 {
-       u8      ch_len;
-       u8      lun_len;
-       u8      blk_len;
-       u8      pg_len;
-       u8      pln_len;
-       u8      sec_len;
-
-       u8      ch_offset;
-       u8      lun_offset;
-       u8      blk_offset;
-       u8      pg_offset;
-       u8      pln_offset;
-       u8      sec_offset;
-
-       u64     ch_mask;
-       u64     lun_mask;
-       u64     blk_mask;
-       u64     pg_mask;
-       u64     pln_mask;
-       u64     sec_mask;
-};
-
-struct nvm_addrf {
-       u8      ch_len;
-       u8      lun_len;
-       u8      chk_len;
-       u8      sec_len;
-       u8      rsv_len[2];
-
-       u8      ch_offset;
-       u8      lun_offset;
-       u8      chk_offset;
-       u8      sec_offset;
-       u8      rsv_off[2];
-
-       u64     ch_mask;
-       u64     lun_mask;
-       u64     chk_mask;
-       u64     sec_mask;
-       u64     rsv_mask[2];
-};
-
-enum {
-       /* Chunk states */
-       NVM_CHK_ST_FREE =       1 << 0,
-       NVM_CHK_ST_CLOSED =     1 << 1,
-       NVM_CHK_ST_OPEN =       1 << 2,
-       NVM_CHK_ST_OFFLINE =    1 << 3,
-
-       /* Chunk types */
-       NVM_CHK_TP_W_SEQ =      1 << 0,
-       NVM_CHK_TP_W_RAN =      1 << 1,
-       NVM_CHK_TP_SZ_SPEC =    1 << 4,
-};
-
-/*
- * Note: The structure size is linked to nvme_nvm_chk_meta such that the same
- * buffer can be used when converting from little endian to cpu addressing.
- */
-struct nvm_chk_meta {
-       u8      state;
-       u8      type;
-       u8      wi;
-       u8      rsvd[5];
-       u64     slba;
-       u64     cnlb;
-       u64     wp;
-};
-
-struct nvm_target {
-       struct list_head list;
-       struct nvm_tgt_dev *dev;
-       struct nvm_tgt_type *type;
-       struct gendisk *disk;
-};
-
-#define ADDR_EMPTY (~0ULL)
-
-#define NVM_TARGET_DEFAULT_OP (101)
-#define NVM_TARGET_MIN_OP (3)
-#define NVM_TARGET_MAX_OP (80)
-
-#define NVM_VERSION_MAJOR 1
-#define NVM_VERSION_MINOR 0
-#define NVM_VERSION_PATCH 0
-
-#define NVM_MAX_VLBA (64) /* max logical blocks in a vector command */
-
-struct nvm_rq;
-typedef void (nvm_end_io_fn)(struct nvm_rq *);
-
-struct nvm_rq {
-       struct nvm_tgt_dev *dev;
-
-       struct bio *bio;
-
-       union {
-               struct ppa_addr ppa_addr;
-               dma_addr_t dma_ppa_list;
-       };
-
-       struct ppa_addr *ppa_list;
-
-       void *meta_list;
-       dma_addr_t dma_meta_list;
-
-       nvm_end_io_fn *end_io;
-
-       uint8_t opcode;
-       uint16_t nr_ppas;
-       uint16_t flags;
-
-       u64 ppa_status; /* ppa media status */
-       int error;
-
-       int is_seq; /* Sequential hint flag. 1.2 only */
-
-       void *private;
-};
-
-static inline struct nvm_rq *nvm_rq_from_pdu(void *pdu)
-{
-       return pdu - sizeof(struct nvm_rq);
-}
-
-static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata)
-{
-       return rqdata + 1;
-}
-
-static inline struct ppa_addr *nvm_rq_to_ppa_list(struct nvm_rq *rqd)
-{
-       return (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
-}
-
-enum {
-       NVM_BLK_ST_FREE =       0x1,    /* Free block */
-       NVM_BLK_ST_TGT =        0x2,    /* Block in use by target */
-       NVM_BLK_ST_BAD =        0x8,    /* Bad block */
-};
-
-/* Instance geometry */
-struct nvm_geo {
-       /* device reported version */
-       u8      major_ver_id;
-       u8      minor_ver_id;
-
-       /* kernel short version */
-       u8      version;
-
-       /* instance specific geometry */
-       int num_ch;
-       int num_lun;            /* per channel */
-
-       /* calculated values */
-       int all_luns;           /* across channels */
-       int all_chunks;         /* across channels */
-
-       int op;                 /* over-provision in instance */
-
-       sector_t total_secs;    /* across channels */
-
-       /* chunk geometry */
-       u32     num_chk;        /* chunks per lun */
-       u32     clba;           /* sectors per chunk */
-       u16     csecs;          /* sector size */
-       u16     sos;            /* out-of-band area size */
-       bool    ext;            /* metadata in extended data buffer */
-       u32     mdts;           /* Max data transfer size*/
-
-       /* device write constrains */
-       u32     ws_min;         /* minimum write size */
-       u32     ws_opt;         /* optimal write size */
-       u32     mw_cunits;      /* distance required for successful read */
-       u32     maxoc;          /* maximum open chunks */
-       u32     maxocpu;        /* maximum open chunks per parallel unit */
-
-       /* device capabilities */
-       u32     mccap;
-
-       /* device timings */
-       u32     trdt;           /* Avg. Tread (ns) */
-       u32     trdm;           /* Max Tread (ns) */
-       u32     tprt;           /* Avg. Tprog (ns) */
-       u32     tprm;           /* Max Tprog (ns) */
-       u32     tbet;           /* Avg. Terase (ns) */
-       u32     tbem;           /* Max Terase (ns) */
-
-       /* generic address format */
-       struct nvm_addrf addrf;
-
-       /* 1.2 compatibility */
-       u8      vmnt;
-       u32     cap;
-       u32     dom;
-
-       u8      mtype;
-       u8      fmtype;
-
-       u16     cpar;
-       u32     mpos;
-
-       u8      num_pln;
-       u8      pln_mode;
-       u16     num_pg;
-       u16     fpg_sz;
-};
-
-/* sub-device structure */
-struct nvm_tgt_dev {
-       /* Device information */
-       struct nvm_geo geo;
-
-       /* Base ppas for target LUNs */
-       struct ppa_addr *luns;
-
-       struct request_queue *q;
-
-       struct nvm_dev *parent;
-       void *map;
-};
-
-struct nvm_dev {
-       struct nvm_dev_ops *ops;
-
-       struct list_head devices;
-
-       /* Device information */
-       struct nvm_geo geo;
-
-       unsigned long *lun_map;
-       void *dma_pool;
-
-       /* Backend device */
-       struct request_queue *q;
-       char name[DISK_NAME_LEN];
-       void *private_data;
-
-       struct kref ref;
-       void *rmap;
-
-       struct mutex mlock;
-       spinlock_t lock;
-
-       /* target management */
-       struct list_head area_list;
-       struct list_head targets;
-};
-
-static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
-                                                 struct ppa_addr r)
-{
-       struct nvm_geo *geo = &dev->geo;
-       struct ppa_addr l;
-
-       if (geo->version == NVM_OCSSD_SPEC_12) {
-               struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&geo->addrf;
-
-               l.ppa = ((u64)r.g.ch) << ppaf->ch_offset;
-               l.ppa |= ((u64)r.g.lun) << ppaf->lun_offset;
-               l.ppa |= ((u64)r.g.blk) << ppaf->blk_offset;
-               l.ppa |= ((u64)r.g.pg) << ppaf->pg_offset;
-               l.ppa |= ((u64)r.g.pl) << ppaf->pln_offset;
-               l.ppa |= ((u64)r.g.sec) << ppaf->sec_offset;
-       } else {
-               struct nvm_addrf *lbaf = &geo->addrf;
-
-               l.ppa = ((u64)r.m.grp) << lbaf->ch_offset;
-               l.ppa |= ((u64)r.m.pu) << lbaf->lun_offset;
-               l.ppa |= ((u64)r.m.chk) << lbaf->chk_offset;
-               l.ppa |= ((u64)r.m.sec) << lbaf->sec_offset;
-       }
-
-       return l;
-}
-
-static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev,
-                                                 struct ppa_addr r)
-{
-       struct nvm_geo *geo = &dev->geo;
-       struct ppa_addr l;
-
-       l.ppa = 0;
-
-       if (geo->version == NVM_OCSSD_SPEC_12) {
-               struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&geo->addrf;
-
-               l.g.ch = (r.ppa & ppaf->ch_mask) >> ppaf->ch_offset;
-               l.g.lun = (r.ppa & ppaf->lun_mask) >> ppaf->lun_offset;
-               l.g.blk = (r.ppa & ppaf->blk_mask) >> ppaf->blk_offset;
-               l.g.pg = (r.ppa & ppaf->pg_mask) >> ppaf->pg_offset;
-               l.g.pl = (r.ppa & ppaf->pln_mask) >> ppaf->pln_offset;
-               l.g.sec = (r.ppa & ppaf->sec_mask) >> ppaf->sec_offset;
-       } else {
-               struct nvm_addrf *lbaf = &geo->addrf;
-
-               l.m.grp = (r.ppa & lbaf->ch_mask) >> lbaf->ch_offset;
-               l.m.pu = (r.ppa & lbaf->lun_mask) >> lbaf->lun_offset;
-               l.m.chk = (r.ppa & lbaf->chk_mask) >> lbaf->chk_offset;
-               l.m.sec = (r.ppa & lbaf->sec_mask) >> lbaf->sec_offset;
-       }
-
-       return l;
-}
-
-static inline u64 dev_to_chunk_addr(struct nvm_dev *dev, void *addrf,
-                                   struct ppa_addr p)
-{
-       struct nvm_geo *geo = &dev->geo;
-       u64 caddr;
-
-       if (geo->version == NVM_OCSSD_SPEC_12) {
-               struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)addrf;
-
-               caddr = (u64)p.g.pg << ppaf->pg_offset;
-               caddr |= (u64)p.g.pl << ppaf->pln_offset;
-               caddr |= (u64)p.g.sec << ppaf->sec_offset;
-       } else {
-               caddr = p.m.sec;
-       }
-
-       return caddr;
-}
-
-static inline struct ppa_addr nvm_ppa32_to_ppa64(struct nvm_dev *dev,
-                                                void *addrf, u32 ppa32)
-{
-       struct ppa_addr ppa64;
-
-       ppa64.ppa = 0;
-
-       if (ppa32 == -1) {
-               ppa64.ppa = ADDR_EMPTY;
-       } else if (ppa32 & (1U << 31)) {
-               ppa64.c.line = ppa32 & ((~0U) >> 1);
-               ppa64.c.is_cached = 1;
-       } else {
-               struct nvm_geo *geo = &dev->geo;
-
-               if (geo->version == NVM_OCSSD_SPEC_12) {
-                       struct nvm_addrf_12 *ppaf = addrf;
-
-                       ppa64.g.ch = (ppa32 & ppaf->ch_mask) >>
-                                                       ppaf->ch_offset;
-                       ppa64.g.lun = (ppa32 & ppaf->lun_mask) >>
-                                                       ppaf->lun_offset;
-                       ppa64.g.blk = (ppa32 & ppaf->blk_mask) >>
-                                                       ppaf->blk_offset;
-                       ppa64.g.pg = (ppa32 & ppaf->pg_mask) >>
-                                                       ppaf->pg_offset;
-                       ppa64.g.pl = (ppa32 & ppaf->pln_mask) >>
-                                                       ppaf->pln_offset;
-                       ppa64.g.sec = (ppa32 & ppaf->sec_mask) >>
-                                                       ppaf->sec_offset;
-               } else {
-                       struct nvm_addrf *lbaf = addrf;
-
-                       ppa64.m.grp = (ppa32 & lbaf->ch_mask) >>
-                                                       lbaf->ch_offset;
-                       ppa64.m.pu = (ppa32 & lbaf->lun_mask) >>
-                                                       lbaf->lun_offset;
-                       ppa64.m.chk = (ppa32 & lbaf->chk_mask) >>
-                                                       lbaf->chk_offset;
-                       ppa64.m.sec = (ppa32 & lbaf->sec_mask) >>
-                                                       lbaf->sec_offset;
-               }
-       }
-
-       return ppa64;
-}
-
-static inline u32 nvm_ppa64_to_ppa32(struct nvm_dev *dev,
-                                    void *addrf, struct ppa_addr ppa64)
-{
-       u32 ppa32 = 0;
-
-       if (ppa64.ppa == ADDR_EMPTY) {
-               ppa32 = ~0U;
-       } else if (ppa64.c.is_cached) {
-               ppa32 |= ppa64.c.line;
-               ppa32 |= 1U << 31;
-       } else {
-               struct nvm_geo *geo = &dev->geo;
-
-               if (geo->version == NVM_OCSSD_SPEC_12) {
-                       struct nvm_addrf_12 *ppaf = addrf;
-
-                       ppa32 |= ppa64.g.ch << ppaf->ch_offset;
-                       ppa32 |= ppa64.g.lun << ppaf->lun_offset;
-                       ppa32 |= ppa64.g.blk << ppaf->blk_offset;
-                       ppa32 |= ppa64.g.pg << ppaf->pg_offset;
-                       ppa32 |= ppa64.g.pl << ppaf->pln_offset;
-                       ppa32 |= ppa64.g.sec << ppaf->sec_offset;
-               } else {
-                       struct nvm_addrf *lbaf = addrf;
-
-                       ppa32 |= ppa64.m.grp << lbaf->ch_offset;
-                       ppa32 |= ppa64.m.pu << lbaf->lun_offset;
-                       ppa32 |= ppa64.m.chk << lbaf->chk_offset;
-                       ppa32 |= ppa64.m.sec << lbaf->sec_offset;
-               }
-       }
-
-       return ppa32;
-}
-
-static inline int nvm_next_ppa_in_chk(struct nvm_tgt_dev *dev,
-                                     struct ppa_addr *ppa)
-{
-       struct nvm_geo *geo = &dev->geo;
-       int last = 0;
-
-       if (geo->version == NVM_OCSSD_SPEC_12) {
-               int sec = ppa->g.sec;
-
-               sec++;
-               if (sec == geo->ws_min) {
-                       int pg = ppa->g.pg;
-
-                       sec = 0;
-                       pg++;
-                       if (pg == geo->num_pg) {
-                               int pl = ppa->g.pl;
-
-                               pg = 0;
-                               pl++;
-                               if (pl == geo->num_pln)
-                                       last = 1;
-
-                               ppa->g.pl = pl;
-                       }
-                       ppa->g.pg = pg;
-               }
-               ppa->g.sec = sec;
-       } else {
-               ppa->m.sec++;
-               if (ppa->m.sec == geo->clba)
-                       last = 1;
-       }
-
-       return last;
-}
-
-typedef sector_t (nvm_tgt_capacity_fn)(void *);
-typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *,
-                               int flags);
-typedef void (nvm_tgt_exit_fn)(void *, bool);
-typedef int (nvm_tgt_sysfs_init_fn)(struct gendisk *);
-typedef void (nvm_tgt_sysfs_exit_fn)(struct gendisk *);
-
-enum {
-       NVM_TGT_F_DEV_L2P = 0,
-       NVM_TGT_F_HOST_L2P = 1 << 0,
-};
-
-struct nvm_tgt_type {
-       const char *name;
-       unsigned int version[3];
-       int flags;
-
-       /* target entry points */
-       const struct block_device_operations *bops;
-       nvm_tgt_capacity_fn *capacity;
-
-       /* module-specific init/teardown */
-       nvm_tgt_init_fn *init;
-       nvm_tgt_exit_fn *exit;
-
-       /* sysfs */
-       nvm_tgt_sysfs_init_fn *sysfs_init;
-       nvm_tgt_sysfs_exit_fn *sysfs_exit;
-
-       /* For internal use */
-       struct list_head list;
-       struct module *owner;
-};
-
-extern int nvm_register_tgt_type(struct nvm_tgt_type *);
-extern void nvm_unregister_tgt_type(struct nvm_tgt_type *);
-
-extern void *nvm_dev_dma_alloc(struct nvm_dev *, gfp_t, dma_addr_t *);
-extern void nvm_dev_dma_free(struct nvm_dev *, void *, dma_addr_t);
-
-extern struct nvm_dev *nvm_alloc_dev(int);
-extern int nvm_register(struct nvm_dev *);
-extern void nvm_unregister(struct nvm_dev *);
-
-extern int nvm_get_chunk_meta(struct nvm_tgt_dev *, struct ppa_addr,
-                             int, struct nvm_chk_meta *);
-extern int nvm_set_chunk_meta(struct nvm_tgt_dev *, struct ppa_addr *,
-                             int, int);
-extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *, void *);
-extern int nvm_submit_io_sync(struct nvm_tgt_dev *, struct nvm_rq *, void *);
-extern void nvm_end_io(struct nvm_rq *);
-
-#else /* CONFIG_NVM */
-struct nvm_dev_ops;
-
-static inline struct nvm_dev *nvm_alloc_dev(int node)
-{
-       return ERR_PTR(-EINVAL);
-}
-static inline int nvm_register(struct nvm_dev *dev)
-{
-       return -EINVAL;
-}
-static inline void nvm_unregister(struct nvm_dev *dev) {}
-#endif /* CONFIG_NVM */
-#endif /* LIGHTNVM.H */
index 17b5943..fd3d0b3 100644 (file)
@@ -41,6 +41,8 @@ int linear_range_get_selector_low(const struct linear_range *r,
 int linear_range_get_selector_high(const struct linear_range *r,
                                   unsigned int val, unsigned int *selector,
                                   bool *found);
+void linear_range_get_selector_within(const struct linear_range *r,
+                                     unsigned int val, unsigned int *selector);
 int linear_range_get_selector_low_array(const struct linear_range *r,
                                        int ranges, unsigned int val,
                                        unsigned int *selector, bool *found);
index ded90b0..975e33b 100644 (file)
@@ -6,6 +6,8 @@
 #include <linux/percpu-defs.h>
 #include <linux/lockdep.h>
 
+#ifndef CONFIG_PREEMPT_RT
+
 typedef struct {
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
        struct lockdep_map      dep_map;
@@ -14,29 +16,14 @@ typedef struct {
 } local_lock_t;
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define LL_DEP_MAP_INIT(lockname)                     \
+# define LOCAL_LOCK_DEBUG_INIT(lockname)               \
        .dep_map = {                                    \
                .name = #lockname,                      \
                .wait_type_inner = LD_WAIT_CONFIG,      \
-               .lock_type = LD_LOCK_PERCPU,                    \
-       }
-#else
-# define LL_DEP_MAP_INIT(lockname)
-#endif
-
-#define INIT_LOCAL_LOCK(lockname)      { LL_DEP_MAP_INIT(lockname) }
-
-#define __local_lock_init(lock)                                        \
-do {                                                           \
-       static struct lock_class_key __key;                     \
-                                                               \
-       debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
-       lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, 0, \
-                             LD_WAIT_CONFIG, LD_WAIT_INV,      \
-                             LD_LOCK_PERCPU);                  \
-} while (0)
+               .lock_type = LD_LOCK_PERCPU,            \
+       },                                              \
+       .owner = NULL,
 
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
 static inline void local_lock_acquire(local_lock_t *l)
 {
        lock_map_acquire(&l->dep_map);
@@ -51,11 +38,30 @@ static inline void local_lock_release(local_lock_t *l)
        lock_map_release(&l->dep_map);
 }
 
+static inline void local_lock_debug_init(local_lock_t *l)
+{
+       l->owner = NULL;
+}
 #else /* CONFIG_DEBUG_LOCK_ALLOC */
+# define LOCAL_LOCK_DEBUG_INIT(lockname)
 static inline void local_lock_acquire(local_lock_t *l) { }
 static inline void local_lock_release(local_lock_t *l) { }
+static inline void local_lock_debug_init(local_lock_t *l) { }
 #endif /* !CONFIG_DEBUG_LOCK_ALLOC */
 
+#define INIT_LOCAL_LOCK(lockname)      { LOCAL_LOCK_DEBUG_INIT(lockname) }
+
+#define __local_lock_init(lock)                                        \
+do {                                                           \
+       static struct lock_class_key __key;                     \
+                                                               \
+       debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
+       lockdep_init_map_type(&(lock)->dep_map, #lock, &__key,  \
+                             0, LD_WAIT_CONFIG, LD_WAIT_INV,   \
+                             LD_LOCK_PERCPU);                  \
+       local_lock_debug_init(lock);                            \
+} while (0)
+
 #define __local_lock(lock)                                     \
        do {                                                    \
                preempt_disable();                              \
@@ -91,3 +97,45 @@ static inline void local_lock_release(local_lock_t *l) { }
                local_lock_release(this_cpu_ptr(lock));         \
                local_irq_restore(flags);                       \
        } while (0)
+
+#else /* !CONFIG_PREEMPT_RT */
+
+/*
+ * On PREEMPT_RT local_lock maps to a per CPU spinlock, which protects the
+ * critical section while staying preemptible.
+ */
+typedef spinlock_t local_lock_t;
+
+#define INIT_LOCAL_LOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname))
+
+#define __local_lock_init(l)                                   \
+       do {                                                    \
+               local_spin_lock_init((l));                      \
+       } while (0)
+
+#define __local_lock(__lock)                                   \
+       do {                                                    \
+               migrate_disable();                              \
+               spin_lock(this_cpu_ptr((__lock)));              \
+       } while (0)
+
+#define __local_lock_irq(lock)                 __local_lock(lock)
+
+#define __local_lock_irqsave(lock, flags)                      \
+       do {                                                    \
+               typecheck(unsigned long, flags);                \
+               flags = 0;                                      \
+               __local_lock(lock);                             \
+       } while (0)
+
+#define __local_unlock(__lock)                                 \
+       do {                                                    \
+               spin_unlock(this_cpu_ptr((__lock)));            \
+               migrate_enable();                               \
+       } while (0)
+
+#define __local_unlock_irq(lock)               __local_unlock(lock)
+
+#define __local_unlock_irqrestore(lock, flags) __local_unlock(lock)
+
+#endif /* CONFIG_PREEMPT_RT */
index bfe5c48..2479792 100644 (file)
@@ -612,12 +612,15 @@ static inline bool mem_cgroup_disabled(void)
        return !cgroup_subsys_enabled(memory_cgrp_subsys);
 }
 
-static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root,
-                                                 struct mem_cgroup *memcg,
-                                                 bool in_low_reclaim)
+static inline void mem_cgroup_protection(struct mem_cgroup *root,
+                                        struct mem_cgroup *memcg,
+                                        unsigned long *min,
+                                        unsigned long *low)
 {
+       *min = *low = 0;
+
        if (mem_cgroup_disabled())
-               return 0;
+               return;
 
        /*
         * There is no reclaim protection applied to a targeted reclaim.
@@ -653,13 +656,10 @@ static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root,
         *
         */
        if (root == memcg)
-               return 0;
-
-       if (in_low_reclaim)
-               return READ_ONCE(memcg->memory.emin);
+               return;
 
-       return max(READ_ONCE(memcg->memory.emin),
-                  READ_ONCE(memcg->memory.elow));
+       *min = READ_ONCE(memcg->memory.emin);
+       *low = READ_ONCE(memcg->memory.elow);
 }
 
 void mem_cgroup_calculate_protection(struct mem_cgroup *root,
@@ -1147,11 +1147,12 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm,
 {
 }
 
-static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root,
-                                                 struct mem_cgroup *memcg,
-                                                 bool in_low_reclaim)
+static inline void mem_cgroup_protection(struct mem_cgroup *root,
+                                        struct mem_cgroup *memcg,
+                                        unsigned long *min,
+                                        unsigned long *low)
 {
-       return 0;
+       *min = *low = 0;
 }
 
 static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root,
index 40a0c2d..2d1895c 100644 (file)
@@ -200,13 +200,13 @@ enum rt5033_reg {
 #define RT5033_REGULATOR_BUCK_VOLTAGE_MIN              1000000U
 #define RT5033_REGULATOR_BUCK_VOLTAGE_MAX              3000000U
 #define RT5033_REGULATOR_BUCK_VOLTAGE_STEP             100000U
-#define RT5033_REGULATOR_BUCK_VOLTAGE_STEP_NUM         21
+#define RT5033_REGULATOR_BUCK_VOLTAGE_STEP_NUM         32
 
 /* RT5033 regulator LDO output voltage uV */
 #define RT5033_REGULATOR_LDO_VOLTAGE_MIN               1200000U
 #define RT5033_REGULATOR_LDO_VOLTAGE_MAX               3000000U
 #define RT5033_REGULATOR_LDO_VOLTAGE_STEP              100000U
-#define RT5033_REGULATOR_LDO_VOLTAGE_STEP_NUM          19
+#define RT5033_REGULATOR_LDO_VOLTAGE_STEP_NUM          32
 
 /* RT5033 regulator SAFE LDO output voltage uV */
 #define RT5033_REGULATOR_SAFE_LDO_VOLTAGE              4900000U
index 1efe374..25a8be5 100644 (file)
@@ -1044,8 +1044,7 @@ void mlx5_unregister_debugfs(void);
 void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas);
 void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm);
 void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
-int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
-                   unsigned int *irqn);
+int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn);
 int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
 int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
 
index 98b56b7..1a9c9d9 100644 (file)
@@ -11,13 +11,15 @@ enum {
 };
 
 enum {
-       MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT   = 0x1, // do I check this caps?
-       MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED  = 0x2,
+       MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT   = 0,
+       MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED  = 1,
 };
 
 enum {
-       MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT   = 0,
-       MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED  = 1,
+       MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT =
+               BIT(MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT),
+       MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED =
+               BIT(MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED),
 };
 
 struct mlx5_ifc_virtio_q_bits {
index 74e6c06..37f9758 100644 (file)
@@ -109,6 +109,7 @@ struct mmc_ext_csd {
        u8                      raw_hc_erase_gap_size;  /* 221 */
        u8                      raw_erase_timeout_mult; /* 223 */
        u8                      raw_hc_erase_grp_size;  /* 224 */
+       u8                      raw_boot_mult;          /* 226 */
        u8                      raw_sec_trim_mult;      /* 229 */
        u8                      raw_sec_erase_mult;     /* 230 */
        u8                      raw_sec_feature_support;/* 231 */
index 0abd47e..78dadf8 100644 (file)
@@ -398,6 +398,7 @@ struct mmc_host {
 #else
 #define MMC_CAP2_CRYPTO                0
 #endif
+#define MMC_CAP2_ALT_GPT_TEGRA (1 << 28)       /* Host with eMMC that has GPT entry at a non-standard location */
 
        int                     fixed_drv_type; /* fixed driver type for non-removable media */
 
index 03dee12..b9b970f 100644 (file)
 static inline void leave_mm(int cpu) { }
 #endif
 
+/*
+ * CPUs that are capable of running user task @p. Must contain at least one
+ * active CPU. It is assumed that the kernel can run on all CPUs, so calling
+ * this for a kernel thread is pointless.
+ *
+ * By default, we assume a sane, homogeneous system.
+ */
+#ifndef task_cpu_possible_mask
+# define task_cpu_possible_mask(p)     cpu_possible_mask
+# define task_cpu_possible(cpu, p)     true
+#else
+# define task_cpu_possible(cpu, p)     cpumask_test_cpu((cpu), task_cpu_possible_mask(p))
+#endif
+
 #endif
index eed280f..962cd41 100644 (file)
@@ -431,6 +431,8 @@ extern int param_get_int(char *buffer, const struct kernel_param *kp);
 extern const struct kernel_param_ops param_ops_uint;
 extern int param_set_uint(const char *val, const struct kernel_param *kp);
 extern int param_get_uint(char *buffer, const struct kernel_param *kp);
+int param_set_uint_minmax(const char *val, const struct kernel_param *kp,
+               unsigned int min, unsigned int max);
 #define param_check_uint(name, p) __param_check(name, p, unsigned int)
 
 extern const struct kernel_param_ops param_ops_long;
index 6aff469..49cf6eb 100644 (file)
@@ -107,7 +107,8 @@ struct ti_sci_inta_msi_desc {
  *                     address or data changes
  * @write_msi_msg_data:        Data parameter for the callback.
  *
- * @masked:    [PCI MSI/X] Mask bits
+ * @msi_mask:  [PCI MSI]   MSI cached mask bits
+ * @msix_ctrl: [PCI MSI-X] MSI-X cached per vector control bits
  * @is_msix:   [PCI MSI/X] True if MSI-X
  * @multiple:  [PCI MSI/X] log2 num of messages allocated
  * @multi_cap: [PCI MSI/X] log2 num of messages supported
@@ -139,7 +140,10 @@ struct msi_desc {
        union {
                /* PCI MSI/X specific data */
                struct {
-                       u32 masked;
+                       union {
+                               u32 msi_mask;
+                               u32 msix_ctrl;
+                       };
                        struct {
                                u8      is_msix         : 1;
                                u8      multiple        : 3;
@@ -232,11 +236,13 @@ void free_msi_entry(struct msi_desc *entry);
 void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
 void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
 
-u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag);
-u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag);
 void pci_msi_mask_irq(struct irq_data *data);
 void pci_msi_unmask_irq(struct irq_data *data);
 
+const struct attribute_group **msi_populate_sysfs(struct device *dev);
+void msi_destroy_sysfs(struct device *dev,
+                      const struct attribute_group **msi_irq_groups);
+
 /*
  * The arch hooks to setup up msi irqs. Default functions are implemented
  * as weak symbols so that they /can/ be overriden by architecture specific
index e193235..8f226d4 100644 (file)
 #include <linux/osq_lock.h>
 #include <linux/debug_locks.h>
 
-struct ww_class;
-struct ww_acquire_ctx;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define __DEP_MAP_MUTEX_INITIALIZER(lockname)                 \
+               , .dep_map = {                                  \
+                       .name = #lockname,                      \
+                       .wait_type_inner = LD_WAIT_SLEEP,       \
+               }
+#else
+# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
+#endif
+
+#ifndef CONFIG_PREEMPT_RT
 
 /*
  * Simple, straightforward mutexes with strict semantics:
@@ -53,7 +62,7 @@ struct ww_acquire_ctx;
  */
 struct mutex {
        atomic_long_t           owner;
-       spinlock_t              wait_lock;
+       raw_spinlock_t          wait_lock;
 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
        struct optimistic_spin_queue osq; /* Spinner MCS lock */
 #endif
@@ -66,27 +75,6 @@ struct mutex {
 #endif
 };
 
-struct ww_mutex {
-       struct mutex base;
-       struct ww_acquire_ctx *ctx;
-#ifdef CONFIG_DEBUG_MUTEXES
-       struct ww_class *ww_class;
-#endif
-};
-
-/*
- * This is the control structure for tasks blocked on mutex,
- * which resides on the blocked task's kernel stack:
- */
-struct mutex_waiter {
-       struct list_head        list;
-       struct task_struct      *task;
-       struct ww_acquire_ctx   *ww_ctx;
-#ifdef CONFIG_DEBUG_MUTEXES
-       void                    *magic;
-#endif
-};
-
 #ifdef CONFIG_DEBUG_MUTEXES
 
 #define __DEBUG_MUTEX_INITIALIZER(lockname)                            \
@@ -117,19 +105,9 @@ do {                                                                       \
        __mutex_init((mutex), #mutex, &__key);                          \
 } while (0)
 
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define __DEP_MAP_MUTEX_INITIALIZER(lockname)                 \
-               , .dep_map = {                                  \
-                       .name = #lockname,                      \
-                       .wait_type_inner = LD_WAIT_SLEEP,       \
-               }
-#else
-# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
-#endif
-
 #define __MUTEX_INITIALIZER(lockname) \
                { .owner = ATOMIC_LONG_INIT(0) \
-               , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
+               , .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
                , .wait_list = LIST_HEAD_INIT(lockname.wait_list) \
                __DEBUG_MUTEX_INITIALIZER(lockname) \
                __DEP_MAP_MUTEX_INITIALIZER(lockname) }
@@ -148,6 +126,50 @@ extern void __mutex_init(struct mutex *lock, const char *name,
  */
 extern bool mutex_is_locked(struct mutex *lock);
 
+#else /* !CONFIG_PREEMPT_RT */
+/*
+ * Preempt-RT variant based on rtmutexes.
+ */
+#include <linux/rtmutex.h>
+
+struct mutex {
+       struct rt_mutex_base    rtmutex;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+       struct lockdep_map      dep_map;
+#endif
+};
+
+#define __MUTEX_INITIALIZER(mutexname)                                 \
+{                                                                      \
+       .rtmutex = __RT_MUTEX_BASE_INITIALIZER(mutexname.rtmutex)       \
+       __DEP_MAP_MUTEX_INITIALIZER(mutexname)                          \
+}
+
+#define DEFINE_MUTEX(mutexname)                                                \
+       struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
+
+extern void __mutex_rt_init(struct mutex *lock, const char *name,
+                           struct lock_class_key *key);
+extern int mutex_trylock(struct mutex *lock);
+
+static inline void mutex_destroy(struct mutex *lock) { }
+
+#define mutex_is_locked(l)     rt_mutex_base_is_locked(&(l)->rtmutex)
+
+#define __mutex_init(mutex, name, key)                 \
+do {                                                   \
+       rt_mutex_base_init(&(mutex)->rtmutex);          \
+       __mutex_rt_init((mutex), name, key);            \
+} while (0)
+
+#define mutex_init(mutex)                              \
+do {                                                   \
+       static struct lock_class_key __key;             \
+                                                       \
+       __mutex_init((mutex), #mutex, &__key);          \
+} while (0)
+#endif /* CONFIG_PREEMPT_RT */
+
 /*
  * See kernel/locking/mutex.c for detailed documentation of these APIs.
  * Also see Documentation/locking/mutex-design.rst.
index 10279c4..ada1296 100644 (file)
@@ -196,6 +196,9 @@ struct ip_set_region {
        u32 elements;           /* Number of elements vs timeout */
 };
 
+/* Max range where every element is added/deleted in one step */
+#define IPSET_MAX_RANGE                (1<<20)
+
 /* The max revision number supported by any set type + 1 */
 #define IPSET_REVISION_MAX     9
 
index 9225ee6..ae6f4eb 100644 (file)
@@ -7,7 +7,7 @@
 
 bool __do_once_start(bool *done, unsigned long *flags);
 void __do_once_done(bool *done, struct static_key_true *once_key,
-                   unsigned long *flags);
+                   unsigned long *flags, struct module *mod);
 
 /* Call a function exactly once. The idea of DO_ONCE() is to perform
  * a function call such as initialization of random seeds, etc, only
@@ -46,7 +46,7 @@ void __do_once_done(bool *done, struct static_key_true *once_key,
                        if (unlikely(___ret)) {                              \
                                func(__VA_ARGS__);                           \
                                __do_once_done(&___done, &___once_key,       \
-                                              &___flags);                   \
+                                              &___flags, THIS_MODULE);      \
                        }                                                    \
                }                                                            \
                ___ret;                                                      \
index a433f13..495b16b 100644 (file)
@@ -12,6 +12,7 @@
 #ifndef PADATA_H
 #define PADATA_H
 
+#include <linux/refcount.h>
 #include <linux/compiler_types.h>
 #include <linux/workqueue.h>
 #include <linux/spinlock.h>
@@ -96,7 +97,7 @@ struct parallel_data {
        struct padata_shell             *ps;
        struct padata_list              __percpu *reorder_list;
        struct padata_serial_queue      __percpu *squeue;
-       atomic_t                        refcnt;
+       refcount_t                      refcnt;
        unsigned int                    seq_nr;
        unsigned int                    processed;
        int                             cpu;
index 4bac183..60e2101 100644 (file)
 #define PCI_DEVICE_ID_3COM_3CR990SVR   0x990a
 
 #define PCI_VENDOR_ID_AL               0x10b9
+#define PCI_DEVICE_ID_AL_M1489         0x1489
 #define PCI_DEVICE_ID_AL_M1533         0x1533
 #define PCI_DEVICE_ID_AL_M1535         0x1535
 #define PCI_DEVICE_ID_AL_M1541         0x1541
 #define PCI_DEVICE_ID_INTEL_82375      0x0482
 #define PCI_DEVICE_ID_INTEL_82424      0x0483
 #define PCI_DEVICE_ID_INTEL_82378      0x0484
+#define PCI_DEVICE_ID_INTEL_82425      0x0486
 #define PCI_DEVICE_ID_INTEL_MRST_SD0   0x0807
 #define PCI_DEVICE_ID_INTEL_MRST_SD1   0x0808
 #define PCI_DEVICE_ID_INTEL_MFD_SD     0x0820
index fa10acb..af308e1 100644 (file)
@@ -78,6 +78,7 @@ struct file;
 
 extern struct pid *pidfd_pid(const struct file *file);
 struct pid *pidfd_get_pid(unsigned int fd, unsigned int *flags);
+int pidfd_create(struct pid *pid, unsigned int flags);
 
 static inline struct pid *get_pid(struct pid *pid)
 {
index 5d2705f..fc56424 100644 (file)
@@ -48,6 +48,7 @@ struct pipe_buffer {
  *     @files: number of struct file referring this pipe (protected by ->i_lock)
  *     @r_counter: reader counter
  *     @w_counter: writer counter
+ *     @poll_usage: is this pipe used for epoll, which has crazy wakeups?
  *     @fasync_readers: reader side fasync
  *     @fasync_writers: writer side fasync
  *     @bufs: the circular array of pipe buffers
@@ -70,6 +71,7 @@ struct pipe_inode_info {
        unsigned int files;
        unsigned int r_counter;
        unsigned int w_counter;
+       unsigned int poll_usage;
        struct page *tmp_page;
        struct fasync_struct *fasync_readers;
        struct fasync_struct *fasync_writers;
index 45f53af..271bd87 100644 (file)
@@ -4228,6 +4228,7 @@ enum ec_device_event {
        EC_DEVICE_EVENT_TRACKPAD,
        EC_DEVICE_EVENT_DSP,
        EC_DEVICE_EVENT_WIFI,
+       EC_DEVICE_EVENT_WLC,
 };
 
 enum ec_device_event_param {
@@ -5460,6 +5461,72 @@ struct ec_response_rollback_info {
 /* Issue AP reset */
 #define EC_CMD_AP_RESET 0x0125
 
+/**
+ * Get the number of peripheral charge ports
+ */
+#define EC_CMD_PCHG_COUNT 0x0134
+
+#define EC_PCHG_MAX_PORTS 8
+
+struct ec_response_pchg_count {
+       uint8_t port_count;
+} __ec_align1;
+
+/**
+ * Get the status of a peripheral charge port
+ */
+#define EC_CMD_PCHG 0x0135
+
+struct ec_params_pchg {
+       uint8_t port;
+} __ec_align1;
+
+struct ec_response_pchg {
+       uint32_t error;                 /* enum pchg_error */
+       uint8_t state;                  /* enum pchg_state state */
+       uint8_t battery_percentage;
+       uint8_t unused0;
+       uint8_t unused1;
+       /* Fields added in version 1 */
+       uint32_t fw_version;
+       uint32_t dropped_event_count;
+} __ec_align2;
+
+enum pchg_state {
+       /* Charger is reset and not initialized. */
+       PCHG_STATE_RESET = 0,
+       /* Charger is initialized or disabled. */
+       PCHG_STATE_INITIALIZED,
+       /* Charger is enabled and ready to detect a device. */
+       PCHG_STATE_ENABLED,
+       /* Device is in proximity. */
+       PCHG_STATE_DETECTED,
+       /* Device is being charged. */
+       PCHG_STATE_CHARGING,
+       /* Device is fully charged. It implies DETECTED (& not charging). */
+       PCHG_STATE_FULL,
+       /* In download (a.k.a. firmware update) mode */
+       PCHG_STATE_DOWNLOAD,
+       /* In download mode. Ready for receiving data. */
+       PCHG_STATE_DOWNLOADING,
+       /* Device is ready for data communication. */
+       PCHG_STATE_CONNECTED,
+       /* Put no more entry below */
+       PCHG_STATE_COUNT,
+};
+
+#define EC_PCHG_STATE_TEXT { \
+       [PCHG_STATE_RESET] = "RESET", \
+       [PCHG_STATE_INITIALIZED] = "INITIALIZED", \
+       [PCHG_STATE_ENABLED] = "ENABLED", \
+       [PCHG_STATE_DETECTED] = "DETECTED", \
+       [PCHG_STATE_CHARGING] = "CHARGING", \
+       [PCHG_STATE_FULL] = "FULL", \
+       [PCHG_STATE_DOWNLOAD] = "DOWNLOAD", \
+       [PCHG_STATE_DOWNLOADING] = "DOWNLOADING", \
+       [PCHG_STATE_CONNECTED] = "CONNECTED", \
+       }
+
 /*****************************************************************************/
 /* Voltage regulator controls */
 
index 65fd5ff..f0db674 100644 (file)
@@ -12,5 +12,6 @@
 /* Board specific platform_data */
 struct mtk_chip_config {
        u32 sample_sel;
+       u32 tick_delay;
 };
 #endif
index 896c16d..00fef00 100644 (file)
@@ -82,12 +82,19 @@ static inline bool cpu_timer_enqueue(struct timerqueue_head *head,
        return timerqueue_add(head, &ctmr->node);
 }
 
-static inline void cpu_timer_dequeue(struct cpu_timer *ctmr)
+static inline bool cpu_timer_queued(struct cpu_timer *ctmr)
 {
-       if (ctmr->head) {
+       return !!ctmr->head;
+}
+
+static inline bool cpu_timer_dequeue(struct cpu_timer *ctmr)
+{
+       if (cpu_timer_queued(ctmr)) {
                timerqueue_del(ctmr->head, &ctmr->node);
                ctmr->head = NULL;
+               return true;
        }
+       return false;
 }
 
 static inline u64 cpu_timer_getexpires(struct cpu_timer *ctmr)
index d55c746..dd24756 100644 (file)
@@ -69,7 +69,7 @@ enum max17042_register {
        MAX17042_RelaxCFG       = 0x2A,
        MAX17042_MiscCFG        = 0x2B,
        MAX17042_TGAIN          = 0x2C,
-       MAx17042_TOFF           = 0x2D,
+       MAX17042_TOFF           = 0x2D,
        MAX17042_CGAIN          = 0x2E,
        MAX17042_COFF           = 0x2F,
 
@@ -110,13 +110,14 @@ enum max17042_register {
        MAX17042_VFSOC          = 0xFF,
 };
 
+/* Registers specific to max17055 only */
 enum max17055_register {
        MAX17055_QRes           = 0x0C,
+       MAX17055_RCell          = 0x14,
        MAX17055_TTF            = 0x20,
-       MAX17055_V_empty        = 0x3A,
-       MAX17055_TIMER          = 0x3E,
+       MAX17055_DieTemp        = 0x34,
        MAX17055_USER_MEM       = 0x40,
-       MAX17055_RGAIN          = 0x42,
+       MAX17055_RGAIN          = 0x43,
 
        MAX17055_ConvgCfg       = 0x49,
        MAX17055_VFRemCap       = 0x4A,
@@ -155,13 +156,14 @@ enum max17055_register {
        MAX17055_AtAvCap        = 0xDF,
 };
 
-/* Registers specific to max17047/50 */
+/* Registers specific to max17047/50/55 */
 enum max17047_register {
        MAX17047_QRTbl00        = 0x12,
        MAX17047_FullSOCThr     = 0x13,
        MAX17047_QRTbl10        = 0x22,
        MAX17047_QRTbl20        = 0x32,
        MAX17047_V_empty        = 0x3A,
+       MAX17047_TIMER          = 0x3E,
        MAX17047_QRTbl30        = 0x42,
 };
 
index be20398..9ca1f12 100644 (file)
@@ -352,6 +352,7 @@ struct power_supply_resistance_temp_table {
  */
 
 struct power_supply_battery_info {
+       unsigned int technology;            /* from the enum above */
        int energy_full_design_uwh;         /* microWatt-hours */
        int charge_full_design_uah;         /* microAmp-hours */
        int voltage_min_design_uv;          /* microVolts */
index 9881eac..4d244e2 100644 (file)
 /*
  * The preempt_count offset after spin_lock()
  */
+#if !defined(CONFIG_PREEMPT_RT)
 #define PREEMPT_LOCK_OFFSET    PREEMPT_DISABLE_OFFSET
+#else
+#define PREEMPT_LOCK_OFFSET    0
+#endif
 
 /*
  * The preempt_count offset needed for things like:
index d31ecaf..235047d 100644 (file)
 #ifndef        _LINUX_RBTREE_H
 #define        _LINUX_RBTREE_H
 
+#include <linux/rbtree_types.h>
+
 #include <linux/kernel.h>
 #include <linux/stddef.h>
 #include <linux/rcupdate.h>
 
-struct rb_node {
-       unsigned long  __rb_parent_color;
-       struct rb_node *rb_right;
-       struct rb_node *rb_left;
-} __attribute__((aligned(sizeof(long))));
-    /* The alignment might seem pointless, but allegedly CRIS needs it */
-
-struct rb_root {
-       struct rb_node *rb_node;
-};
-
 #define rb_parent(r)   ((struct rb_node *)((r)->__rb_parent_color & ~3))
 
-#define RB_ROOT        (struct rb_root) { NULL, }
 #define        rb_entry(ptr, type, member) container_of(ptr, type, member)
 
 #define RB_EMPTY_ROOT(root)  (READ_ONCE((root)->rb_node) == NULL)
@@ -112,23 +102,6 @@ static inline void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent
                        typeof(*pos), field); 1; }); \
             pos = n)
 
-/*
- * Leftmost-cached rbtrees.
- *
- * We do not cache the rightmost node based on footprint
- * size vs number of potential users that could benefit
- * from O(1) rb_last(). Just not worth it, users that want
- * this feature can always implement the logic explicitly.
- * Furthermore, users that want to cache both pointers may
- * find it a bit asymmetric, but that's ok.
- */
-struct rb_root_cached {
-       struct rb_root rb_root;
-       struct rb_node *rb_leftmost;
-};
-
-#define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL }
-
 /* Same as rb_first(), but O(1) */
 #define rb_first_cached(root) (root)->rb_leftmost
 
diff --git a/include/linux/rbtree_types.h b/include/linux/rbtree_types.h
new file mode 100644 (file)
index 0000000..45b6ecd
--- /dev/null
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _LINUX_RBTREE_TYPES_H
+#define _LINUX_RBTREE_TYPES_H
+
+struct rb_node {
+       unsigned long  __rb_parent_color;
+       struct rb_node *rb_right;
+       struct rb_node *rb_left;
+} __attribute__((aligned(sizeof(long))));
+/* The alignment might seem pointless, but allegedly CRIS needs it */
+
+struct rb_root {
+       struct rb_node *rb_node;
+};
+
+/*
+ * Leftmost-cached rbtrees.
+ *
+ * We do not cache the rightmost node based on footprint
+ * size vs number of potential users that could benefit
+ * from O(1) rb_last(). Just not worth it, users that want
+ * this feature can always implement the logic explicitly.
+ * Furthermore, users that want to cache both pointers may
+ * find it a bit asymmetric, but that's ok.
+ */
+struct rb_root_cached {
+       struct rb_root rb_root;
+       struct rb_node *rb_leftmost;
+};
+
+#define RB_ROOT (struct rb_root) { NULL, }
+#define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL }
+
+#endif
index f8633d3..d29740b 100644 (file)
 #include <linux/list.h>
 #include <linux/rcupdate.h>
 
-/*
- * Why is there no list_empty_rcu()?  Because list_empty() serves this
- * purpose.  The list_empty() function fetches the RCU-protected pointer
- * and compares it to the address of the list head, but neither dereferences
- * this pointer itself nor provides this pointer to the caller.  Therefore,
- * it is not necessary to use rcu_dereference(), so that list_empty() can
- * be used anywhere you would want to use a list_empty_rcu().
- */
-
 /*
  * INIT_LIST_HEAD_RCU - Initialize a list_head visible to RCU readers
  * @list: list to be initialized
@@ -318,21 +309,29 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
 /*
  * Where are list_empty_rcu() and list_first_entry_rcu()?
  *
- * Implementing those functions following their counterparts list_empty() and
- * list_first_entry() is not advisable because they lead to subtle race
- * conditions as the following snippet shows:
+ * They do not exist because they would lead to subtle race conditions:
  *
  * if (!list_empty_rcu(mylist)) {
  *     struct foo *bar = list_first_entry_rcu(mylist, struct foo, list_member);
  *     do_something(bar);
  * }
  *
- * The list may not be empty when list_empty_rcu checks it, but it may be when
- * list_first_entry_rcu rereads the ->next pointer.
- *
- * Rereading the ->next pointer is not a problem for list_empty() and
- * list_first_entry() because they would be protected by a lock that blocks
- * writers.
+ * The list might be non-empty when list_empty_rcu() checks it, but it
+ * might have become empty by the time that list_first_entry_rcu() rereads
+ * the ->next pointer, which would result in a SEGV.
+ *
+ * When not using RCU, it is OK for list_first_entry() to re-read that
+ * pointer because both functions should be protected by some lock that
+ * blocks writers.
+ *
+ * When using RCU, list_empty() uses READ_ONCE() to fetch the
+ * RCU-protected ->next pointer and then compares it to the address of the
+ * list head.  However, it neither dereferences this pointer nor provides
+ * this pointer to its caller.  Thus, READ_ONCE() suffices (that is,
+ * rcu_dereference() is not needed), which means that list_empty() can be
+ * used anywhere you would want to use list_empty_rcu().  Just don't
+ * expect anything useful to happen if you do a subsequent lockless
+ * call to list_first_entry_rcu()!!!
  *
  * See list_first_or_null_rcu for an alternative.
  */
index d9680b7..434d12f 100644 (file)
@@ -53,7 +53,7 @@ void __rcu_read_unlock(void);
  * nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other
  * types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
  */
-#define rcu_preempt_depth() (current->rcu_read_lock_nesting)
+#define rcu_preempt_depth() READ_ONCE(current->rcu_read_lock_nesting)
 
 #else /* #ifdef CONFIG_PREEMPT_RCU */
 
@@ -167,7 +167,7 @@ void synchronize_rcu_tasks(void);
 # define synchronize_rcu_tasks synchronize_rcu
 # endif
 
-# ifdef CONFIG_TASKS_RCU_TRACE
+# ifdef CONFIG_TASKS_TRACE_RCU
 # define rcu_tasks_trace_qs(t)                                         \
        do {                                                            \
                if (!likely(READ_ONCE((t)->trc_reader_checked)) &&      \
index 953e70f..9be0153 100644 (file)
@@ -14,9 +14,6 @@
 
 #include <asm/param.h> /* for HZ */
 
-/* Never flag non-existent other CPUs! */
-static inline bool rcu_eqs_special_set(int cpu) { return false; }
-
 unsigned long get_state_synchronize_rcu(void);
 unsigned long start_poll_synchronize_rcu(void);
 bool poll_state_synchronize_rcu(unsigned long oldstate);
index f5f08dd..e3c9a25 100644 (file)
@@ -344,6 +344,7 @@ typedef void (*regmap_unlock)(void *);
  * @ranges: Array of configuration entries for virtual address ranges.
  * @num_ranges: Number of range configuration entries.
  * @use_hwlock: Indicate if a hardware spinlock should be used.
+ * @use_raw_spinlock: Indicate if a raw spinlock should be used.
  * @hwlock_id: Specify the hardware spinlock id.
  * @hwlock_mode: The hardware spinlock mode, should be HWLOCK_IRQSTATE,
  *              HWLOCK_IRQ or 0.
@@ -403,6 +404,7 @@ struct regmap_config {
        unsigned int num_ranges;
 
        bool use_hwlock;
+       bool use_raw_spinlock;
        unsigned int hwlock_id;
        unsigned int hwlock_mode;
 
@@ -1269,12 +1271,13 @@ void devm_regmap_field_free(struct device *dev, struct regmap_field *field);
 
 int regmap_field_bulk_alloc(struct regmap *regmap,
                             struct regmap_field **rm_field,
-                            struct reg_field *reg_field,
+                            const struct reg_field *reg_field,
                             int num_fields);
 void regmap_field_bulk_free(struct regmap_field *field);
 int devm_regmap_field_bulk_alloc(struct device *dev, struct regmap *regmap,
                                 struct regmap_field **field,
-                                struct reg_field *reg_field, int num_fields);
+                                const struct reg_field *reg_field,
+                                int num_fields);
 void devm_regmap_field_bulk_free(struct device *dev,
                                 struct regmap_field *field);
 
index f72ca73..bbf6590 100644 (file)
@@ -222,17 +222,12 @@ void regulator_bulk_unregister_supply_alias(struct device *dev,
 int devm_regulator_register_supply_alias(struct device *dev, const char *id,
                                         struct device *alias_dev,
                                         const char *alias_id);
-void devm_regulator_unregister_supply_alias(struct device *dev,
-                                           const char *id);
 
 int devm_regulator_bulk_register_supply_alias(struct device *dev,
                                              const char *const *id,
                                              struct device *alias_dev,
                                              const char *const *alias_id,
                                              int num_id);
-void devm_regulator_bulk_unregister_supply_alias(struct device *dev,
-                                                const char *const *id,
-                                                int num_id);
 
 /* regulator output control and status */
 int __must_check regulator_enable(struct regulator *regulator);
@@ -408,11 +403,6 @@ static inline int devm_regulator_register_supply_alias(struct device *dev,
        return 0;
 }
 
-static inline void devm_regulator_unregister_supply_alias(struct device *dev,
-                                                         const char *id)
-{
-}
-
 static inline int devm_regulator_bulk_register_supply_alias(struct device *dev,
                                                const char *const *id,
                                                struct device *alias_dev,
@@ -422,11 +412,6 @@ static inline int devm_regulator_bulk_register_supply_alias(struct device *dev,
        return 0;
 }
 
-static inline void devm_regulator_bulk_unregister_supply_alias(
-       struct device *dev, const char *const *id, int num_id)
-{
-}
-
 static inline int regulator_enable(struct regulator *regulator)
 {
        return 0;
index 4aec203..bd7a73d 100644 (file)
@@ -337,6 +337,12 @@ enum regulator_type {
  * @pull_down_val_on: Enabling value for control when using regmap
  *                     set_pull_down
  *
+ * @ramp_reg:          Register for controlling the regulator ramp-rate.
+ * @ramp_mask:         Bitmask for the ramp-rate control register.
+ * @ramp_delay_table:  Table for mapping the regulator ramp-rate values. Values
+ *                     should be given in units of V/S (uV/uS). See the
+ *                     regulator_set_ramp_delay_regmap().
+ *
  * @enable_time: Time taken for initial enable of regulator (in uS).
  * @off_on_delay: guard time (in uS), before re-enabling a regulator
  *
@@ -462,7 +468,7 @@ struct regulator_err_state {
 };
 
 /**
- * struct regulator_irq_data - regulator error/notification status date
+ * struct regulator_irq_data - regulator error/notification status data
  *
  * @states:    Status structs for each of the associated regulators.
  * @num_states:        Amount of associated regulators.
@@ -521,8 +527,8 @@ struct regulator_irq_data {
  *             active events as core does not clean the map data.
  *             REGULATOR_FAILED_RETRY can be returned to indicate that the
  *             status reading from IC failed. If this is repeated for
- *             fatal_cnt times the core will call die() callback or BUG()
- *             as a last resort to protect the HW.
+ *             fatal_cnt times the core will call die() callback or power-off
+ *             the system as a last resort to protect the HW.
  * @renable:   Optional callback to check status (if HW supports that) before
  *             re-enabling IRQ. If implemented this should clear the error
  *             flags so that errors fetched by regulator_get_error_flags()
@@ -531,7 +537,8 @@ struct regulator_irq_data {
  *             REGULATOR_FAILED_RETRY can be returned to
  *             indicate that the status reading from IC failed. If this is
  *             repeated for 'fatal_cnt' times the core will call die()
- *             callback or BUG() as a last resort to protect the HW.
+ *             callback or if die() is not populated then attempt to power-off
+ *             the system as a last resort to protect the HW.
  *             Returning zero indicates that the problem in HW has been solved
  *             and IRQ will be re-enabled. Returning REGULATOR_ERROR_ON
  *             indicates the error condition is still active and keeps IRQ
@@ -645,7 +652,6 @@ devm_regulator_register(struct device *dev,
                        const struct regulator_desc *regulator_desc,
                        const struct regulator_config *config);
 void regulator_unregister(struct regulator_dev *rdev);
-void devm_regulator_unregister(struct device *dev, struct regulator_dev *rdev);
 
 int regulator_notifier_call_chain(struct regulator_dev *rdev,
                                  unsigned long event, void *data);
index 68b4a51..621b7f4 100644 (file)
@@ -112,7 +112,7 @@ struct notification_limit {
  * @over_voltage_limits:       Limits for acting on over voltage.
  * @under_voltage_limits:      Limits for acting on under voltage.
  * @temp_limits:               Limits for acting on over temperature.
-
+ *
  * @max_spread: Max possible spread between coupled regulators
  * @max_uV_step: Max possible step change in voltage
  * @valid_modes_mask: Mask of modes which may be configured by consumers.
index 9b05af9..21deb52 100644 (file)
@@ -2,6 +2,8 @@
 #ifndef _RESCTRL_H
 #define _RESCTRL_H
 
+#include <linux/kernel.h>
+#include <linux/list.h>
 #include <linux/pid.h>
 
 #ifdef CONFIG_PROC_CPU_RESCTRL
@@ -13,4 +15,186 @@ int proc_resctrl_show(struct seq_file *m,
 
 #endif
 
+/**
+ * enum resctrl_conf_type - The type of configuration.
+ * @CDP_NONE:  No prioritisation, both code and data are controlled or monitored.
+ * @CDP_CODE:  Configuration applies to instruction fetches.
+ * @CDP_DATA:  Configuration applies to reads and writes.
+ */
+enum resctrl_conf_type {
+       CDP_NONE,
+       CDP_CODE,
+       CDP_DATA,
+};
+
+#define CDP_NUM_TYPES  (CDP_DATA + 1)
+
+/**
+ * struct resctrl_staged_config - parsed configuration to be applied
+ * @new_ctrl:          new ctrl value to be loaded
+ * @have_new_ctrl:     whether the user provided new_ctrl is valid
+ */
+struct resctrl_staged_config {
+       u32                     new_ctrl;
+       bool                    have_new_ctrl;
+};
+
+/**
+ * struct rdt_domain - group of CPUs sharing a resctrl resource
+ * @list:              all instances of this resource
+ * @id:                        unique id for this instance
+ * @cpu_mask:          which CPUs share this resource
+ * @rmid_busy_llc:     bitmap of which limbo RMIDs are above threshold
+ * @mbm_total:         saved state for MBM total bandwidth
+ * @mbm_local:         saved state for MBM local bandwidth
+ * @mbm_over:          worker to periodically read MBM h/w counters
+ * @cqm_limbo:         worker to periodically read CQM h/w counters
+ * @mbm_work_cpu:      worker CPU for MBM h/w counters
+ * @cqm_work_cpu:      worker CPU for CQM h/w counters
+ * @plr:               pseudo-locked region (if any) associated with domain
+ * @staged_config:     parsed configuration to be applied
+ */
+struct rdt_domain {
+       struct list_head                list;
+       int                             id;
+       struct cpumask                  cpu_mask;
+       unsigned long                   *rmid_busy_llc;
+       struct mbm_state                *mbm_total;
+       struct mbm_state                *mbm_local;
+       struct delayed_work             mbm_over;
+       struct delayed_work             cqm_limbo;
+       int                             mbm_work_cpu;
+       int                             cqm_work_cpu;
+       struct pseudo_lock_region       *plr;
+       struct resctrl_staged_config    staged_config[CDP_NUM_TYPES];
+};
+
+/**
+ * struct resctrl_cache - Cache allocation related data
+ * @cbm_len:           Length of the cache bit mask
+ * @min_cbm_bits:      Minimum number of consecutive bits to be set
+ * @shareable_bits:    Bitmask of shareable resource with other
+ *                     executing entities
+ * @arch_has_sparse_bitmaps:   True if a bitmap like f00f is valid.
+ * @arch_has_empty_bitmaps:    True if the '0' bitmap is valid.
+ * @arch_has_per_cpu_cfg:      True if QOS_CFG register for this cache
+ *                             level has CPU scope.
+ */
+struct resctrl_cache {
+       unsigned int    cbm_len;
+       unsigned int    min_cbm_bits;
+       unsigned int    shareable_bits;
+       bool            arch_has_sparse_bitmaps;
+       bool            arch_has_empty_bitmaps;
+       bool            arch_has_per_cpu_cfg;
+};
+
+/**
+ * enum membw_throttle_mode - System's memory bandwidth throttling mode
+ * @THREAD_THROTTLE_UNDEFINED: Not relevant to the system
+ * @THREAD_THROTTLE_MAX:       Memory bandwidth is throttled at the core
+ *                             always using smallest bandwidth percentage
+ *                             assigned to threads, aka "max throttling"
+ * @THREAD_THROTTLE_PER_THREAD:        Memory bandwidth is throttled at the thread
+ */
+enum membw_throttle_mode {
+       THREAD_THROTTLE_UNDEFINED = 0,
+       THREAD_THROTTLE_MAX,
+       THREAD_THROTTLE_PER_THREAD,
+};
+
+/**
+ * struct resctrl_membw - Memory bandwidth allocation related data
+ * @min_bw:            Minimum memory bandwidth percentage user can request
+ * @bw_gran:           Granularity at which the memory bandwidth is allocated
+ * @delay_linear:      True if memory B/W delay is in linear scale
+ * @arch_needs_linear: True if we can't configure non-linear resources
+ * @throttle_mode:     Bandwidth throttling mode when threads request
+ *                     different memory bandwidths
+ * @mba_sc:            True if MBA software controller(mba_sc) is enabled
+ * @mb_map:            Mapping of memory B/W percentage to memory B/W delay
+ */
+struct resctrl_membw {
+       u32                             min_bw;
+       u32                             bw_gran;
+       u32                             delay_linear;
+       bool                            arch_needs_linear;
+       enum membw_throttle_mode        throttle_mode;
+       bool                            mba_sc;
+       u32                             *mb_map;
+};
+
+struct rdt_parse_data;
+struct resctrl_schema;
+
+/**
+ * struct rdt_resource - attributes of a resctrl resource
+ * @rid:               The index of the resource
+ * @alloc_enabled:     Is allocation enabled on this machine
+ * @mon_enabled:       Is monitoring enabled for this feature
+ * @alloc_capable:     Is allocation available on this machine
+ * @mon_capable:       Is monitor feature available on this machine
+ * @num_rmid:          Number of RMIDs available
+ * @cache_level:       Which cache level defines scope of this resource
+ * @cache:             Cache allocation related data
+ * @membw:             If the component has bandwidth controls, their properties.
+ * @domains:           All domains for this resource
+ * @name:              Name to use in "schemata" file.
+ * @data_width:                Character width of data when displaying
+ * @default_ctrl:      Specifies default cache cbm or memory B/W percent.
+ * @format_str:                Per resource format string to show domain value
+ * @parse_ctrlval:     Per resource function pointer to parse control values
+ * @evt_list:          List of monitoring events
+ * @fflags:            flags to choose base and info files
+ * @cdp_capable:       Is the CDP feature available on this resource
+ */
+struct rdt_resource {
+       int                     rid;
+       bool                    alloc_enabled;
+       bool                    mon_enabled;
+       bool                    alloc_capable;
+       bool                    mon_capable;
+       int                     num_rmid;
+       int                     cache_level;
+       struct resctrl_cache    cache;
+       struct resctrl_membw    membw;
+       struct list_head        domains;
+       char                    *name;
+       int                     data_width;
+       u32                     default_ctrl;
+       const char              *format_str;
+       int                     (*parse_ctrlval)(struct rdt_parse_data *data,
+                                                struct resctrl_schema *s,
+                                                struct rdt_domain *d);
+       struct list_head        evt_list;
+       unsigned long           fflags;
+       bool                    cdp_capable;
+};
+
+/**
+ * struct resctrl_schema - configuration abilities of a resource presented to
+ *                        user-space
+ * @list:      Member of resctrl_schema_all.
+ * @name:      The name to use in the "schemata" file.
+ * @conf_type: Whether this schema is specific to code/data.
+ * @res:       The resource structure exported by the architecture to describe
+ *             the hardware that is configured by this schema.
+ * @num_closid:        The number of closid that can be used with this schema. When
+ *             features like CDP are enabled, this will be lower than the
+ *             hardware supports for the resource.
+ */
+struct resctrl_schema {
+       struct list_head                list;
+       char                            name[8];
+       enum resctrl_conf_type          conf_type;
+       struct rdt_resource             *res;
+       u32                             num_closid;
+};
+
+/* The number of closid supported by this resource regardless of CDP */
+u32 resctrl_arch_get_num_closid(struct rdt_resource *r);
+int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid);
+u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d,
+                           u32 closid, enum resctrl_conf_type type);
+
 #endif /* _RESCTRL_H */
index d1672de..9deedfe 100644 (file)
 #ifndef __LINUX_RT_MUTEX_H
 #define __LINUX_RT_MUTEX_H
 
+#include <linux/compiler.h>
 #include <linux/linkage.h>
-#include <linux/rbtree.h>
-#include <linux/spinlock_types.h>
+#include <linux/rbtree_types.h>
+#include <linux/spinlock_types_raw.h>
 
 extern int max_lock_depth; /* for sysctl */
 
+struct rt_mutex_base {
+       raw_spinlock_t          wait_lock;
+       struct rb_root_cached   waiters;
+       struct task_struct      *owner;
+};
+
+#define __RT_MUTEX_BASE_INITIALIZER(rtbasename)                                \
+{                                                                      \
+       .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(rtbasename.wait_lock),    \
+       .waiters = RB_ROOT_CACHED,                                      \
+       .owner = NULL                                                   \
+}
+
+/**
+ * rt_mutex_base_is_locked - is the rtmutex locked
+ * @lock: the mutex to be queried
+ *
+ * Returns true if the mutex is locked, false if unlocked.
+ */
+static inline bool rt_mutex_base_is_locked(struct rt_mutex_base *lock)
+{
+       return READ_ONCE(lock->owner) != NULL;
+}
+
+extern void rt_mutex_base_init(struct rt_mutex_base *rtb);
+
 /**
  * The rt_mutex structure
  *
@@ -28,9 +55,7 @@ extern int max_lock_depth; /* for sysctl */
  * @owner:     the mutex owner
  */
 struct rt_mutex {
-       raw_spinlock_t          wait_lock;
-       struct rb_root_cached   waiters;
-       struct task_struct      *owner;
+       struct rt_mutex_base    rtmutex;
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
        struct lockdep_map      dep_map;
 #endif
@@ -52,32 +77,24 @@ do { \
 } while (0)
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
-#define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \
-       , .dep_map = { .name = #mutexname }
+#define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)      \
+       .dep_map = {                                    \
+               .name = #mutexname,                     \
+               .wait_type_inner = LD_WAIT_SLEEP,       \
+       }
 #else
 #define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)
 #endif
 
-#define __RT_MUTEX_INITIALIZER(mutexname) \
-       { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
-       , .waiters = RB_ROOT_CACHED \
-       , .owner = NULL \
-       __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)}
+#define __RT_MUTEX_INITIALIZER(mutexname)                              \
+{                                                                      \
+       .rtmutex = __RT_MUTEX_BASE_INITIALIZER(mutexname.rtmutex),      \
+       __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)                       \
+}
 
 #define DEFINE_RT_MUTEX(mutexname) \
        struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname)
 
-/**
- * rt_mutex_is_locked - is the mutex locked
- * @lock: the mutex to be queried
- *
- * Returns 1 if the mutex is locked, 0 if unlocked.
- */
-static inline int rt_mutex_is_locked(struct rt_mutex *lock)
-{
-       return lock->owner != NULL;
-}
-
 extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key);
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
diff --git a/include/linux/rwbase_rt.h b/include/linux/rwbase_rt.h
new file mode 100644 (file)
index 0000000..1d264dd
--- /dev/null
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#ifndef _LINUX_RWBASE_RT_H
+#define _LINUX_RWBASE_RT_H
+
+#include <linux/rtmutex.h>
+#include <linux/atomic.h>
+
+#define READER_BIAS            (1U << 31)
+#define WRITER_BIAS            (1U << 30)
+
+struct rwbase_rt {
+       atomic_t                readers;
+       struct rt_mutex_base    rtmutex;
+};
+
+#define __RWBASE_INITIALIZER(name)                             \
+{                                                              \
+       .readers = ATOMIC_INIT(READER_BIAS),                    \
+       .rtmutex = __RT_MUTEX_BASE_INITIALIZER(name.rtmutex),   \
+}
+
+#define init_rwbase_rt(rwbase)                                 \
+       do {                                                    \
+               rt_mutex_base_init(&(rwbase)->rtmutex);         \
+               atomic_set(&(rwbase)->readers, READER_BIAS);    \
+       } while (0)
+
+
+static __always_inline bool rw_base_is_locked(struct rwbase_rt *rwb)
+{
+       return atomic_read(&rwb->readers) != READER_BIAS;
+}
+
+static __always_inline bool rw_base_is_contended(struct rwbase_rt *rwb)
+{
+       return atomic_read(&rwb->readers) > 0;
+}
+
+#endif /* _LINUX_RWBASE_RT_H */
diff --git a/include/linux/rwlock_rt.h b/include/linux/rwlock_rt.h
new file mode 100644 (file)
index 0000000..49c1f38
--- /dev/null
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#ifndef __LINUX_RWLOCK_RT_H
+#define __LINUX_RWLOCK_RT_H
+
+#ifndef __LINUX_SPINLOCK_RT_H
+#error Do not #include directly. Use <linux/spinlock.h>.
+#endif
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+extern void __rt_rwlock_init(rwlock_t *rwlock, const char *name,
+                            struct lock_class_key *key);
+#else
+static inline void __rt_rwlock_init(rwlock_t *rwlock, char *name,
+                                   struct lock_class_key *key)
+{
+}
+#endif
+
+#define rwlock_init(rwl)                               \
+do {                                                   \
+       static struct lock_class_key __key;             \
+                                                       \
+       init_rwbase_rt(&(rwl)->rwbase);                 \
+       __rt_rwlock_init(rwl, #rwl, &__key);            \
+} while (0)
+
+extern void rt_read_lock(rwlock_t *rwlock);
+extern int rt_read_trylock(rwlock_t *rwlock);
+extern void rt_read_unlock(rwlock_t *rwlock);
+extern void rt_write_lock(rwlock_t *rwlock);
+extern int rt_write_trylock(rwlock_t *rwlock);
+extern void rt_write_unlock(rwlock_t *rwlock);
+
+static __always_inline void read_lock(rwlock_t *rwlock)
+{
+       rt_read_lock(rwlock);
+}
+
+static __always_inline void read_lock_bh(rwlock_t *rwlock)
+{
+       local_bh_disable();
+       rt_read_lock(rwlock);
+}
+
+static __always_inline void read_lock_irq(rwlock_t *rwlock)
+{
+       rt_read_lock(rwlock);
+}
+
+#define read_lock_irqsave(lock, flags)                 \
+       do {                                            \
+               typecheck(unsigned long, flags);        \
+               rt_read_lock(lock);                     \
+               flags = 0;                              \
+       } while (0)
+
+#define read_trylock(lock)     __cond_lock(lock, rt_read_trylock(lock))
+
+static __always_inline void read_unlock(rwlock_t *rwlock)
+{
+       rt_read_unlock(rwlock);
+}
+
+static __always_inline void read_unlock_bh(rwlock_t *rwlock)
+{
+       rt_read_unlock(rwlock);
+       local_bh_enable();
+}
+
+static __always_inline void read_unlock_irq(rwlock_t *rwlock)
+{
+       rt_read_unlock(rwlock);
+}
+
+static __always_inline void read_unlock_irqrestore(rwlock_t *rwlock,
+                                                  unsigned long flags)
+{
+       rt_read_unlock(rwlock);
+}
+
+static __always_inline void write_lock(rwlock_t *rwlock)
+{
+       rt_write_lock(rwlock);
+}
+
+static __always_inline void write_lock_bh(rwlock_t *rwlock)
+{
+       local_bh_disable();
+       rt_write_lock(rwlock);
+}
+
+static __always_inline void write_lock_irq(rwlock_t *rwlock)
+{
+       rt_write_lock(rwlock);
+}
+
+#define write_lock_irqsave(lock, flags)                        \
+       do {                                            \
+               typecheck(unsigned long, flags);        \
+               rt_write_lock(lock);                    \
+               flags = 0;                              \
+       } while (0)
+
+#define write_trylock(lock)    __cond_lock(lock, rt_write_trylock(lock))
+
+#define write_trylock_irqsave(lock, flags)             \
+({                                                     \
+       int __locked;                                   \
+                                                       \
+       typecheck(unsigned long, flags);                \
+       flags = 0;                                      \
+       __locked = write_trylock(lock);                 \
+       __locked;                                       \
+})
+
+static __always_inline void write_unlock(rwlock_t *rwlock)
+{
+       rt_write_unlock(rwlock);
+}
+
+static __always_inline void write_unlock_bh(rwlock_t *rwlock)
+{
+       rt_write_unlock(rwlock);
+       local_bh_enable();
+}
+
+static __always_inline void write_unlock_irq(rwlock_t *rwlock)
+{
+       rt_write_unlock(rwlock);
+}
+
+static __always_inline void write_unlock_irqrestore(rwlock_t *rwlock,
+                                                   unsigned long flags)
+{
+       rt_write_unlock(rwlock);
+}
+
+#define rwlock_is_contended(lock)              (((void)(lock), 0))
+
+#endif /* __LINUX_RWLOCK_RT_H */
index 3bd03e1..1948442 100644 (file)
@@ -1,9 +1,23 @@
 #ifndef __LINUX_RWLOCK_TYPES_H
 #define __LINUX_RWLOCK_TYPES_H
 
+#if !defined(__LINUX_SPINLOCK_TYPES_H)
+# error "Do not include directly, include spinlock_types.h"
+#endif
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define RW_DEP_MAP_INIT(lockname)                                     \
+       .dep_map = {                                                    \
+               .name = #lockname,                                      \
+               .wait_type_inner = LD_WAIT_CONFIG,                      \
+       }
+#else
+# define RW_DEP_MAP_INIT(lockname)
+#endif
+
+#ifndef CONFIG_PREEMPT_RT
 /*
- * include/linux/rwlock_types.h - generic rwlock type definitions
- *                               and initializers
+ * generic rwlock type definitions and initializers
  *
  * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
  * Released under the General Public License (GPL).
@@ -21,16 +35,6 @@ typedef struct {
 
 #define RWLOCK_MAGIC           0xdeaf1eed
 
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define RW_DEP_MAP_INIT(lockname)                                     \
-       .dep_map = {                                                    \
-               .name = #lockname,                                      \
-               .wait_type_inner = LD_WAIT_CONFIG,                      \
-       }
-#else
-# define RW_DEP_MAP_INIT(lockname)
-#endif
-
 #ifdef CONFIG_DEBUG_SPINLOCK
 #define __RW_LOCK_UNLOCKED(lockname)                                   \
        (rwlock_t)      {       .raw_lock = __ARCH_RW_LOCK_UNLOCKED,    \
@@ -46,4 +50,29 @@ typedef struct {
 
 #define DEFINE_RWLOCK(x)       rwlock_t x = __RW_LOCK_UNLOCKED(x)
 
+#else /* !CONFIG_PREEMPT_RT */
+
+#include <linux/rwbase_rt.h>
+
+typedef struct {
+       struct rwbase_rt        rwbase;
+       atomic_t                readers;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+       struct lockdep_map      dep_map;
+#endif
+} rwlock_t;
+
+#define __RWLOCK_RT_INITIALIZER(name)                                  \
+{                                                                      \
+       .rwbase = __RWBASE_INITIALIZER(name),                           \
+       RW_DEP_MAP_INIT(name)                                           \
+}
+
+#define __RW_LOCK_UNLOCKED(name) __RWLOCK_RT_INITIALIZER(name)
+
+#define DEFINE_RWLOCK(name)                                            \
+       rwlock_t name = __RW_LOCK_UNLOCKED(name)
+
+#endif /* CONFIG_PREEMPT_RT */
+
 #endif /* __LINUX_RWLOCK_TYPES_H */
index a66038d..426e98e 100644 (file)
 #include <linux/spinlock.h>
 #include <linux/atomic.h>
 #include <linux/err.h>
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define __RWSEM_DEP_MAP_INIT(lockname)                        \
+       .dep_map = {                                    \
+               .name = #lockname,                      \
+               .wait_type_inner = LD_WAIT_SLEEP,       \
+       },
+#else
+# define __RWSEM_DEP_MAP_INIT(lockname)
+#endif
+
+#ifndef CONFIG_PREEMPT_RT
+
 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
 #include <linux/osq_lock.h>
 #endif
@@ -64,16 +77,6 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
 
 /* Common initializer macros and functions */
 
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define __RWSEM_DEP_MAP_INIT(lockname)                        \
-       .dep_map = {                                    \
-               .name = #lockname,                      \
-               .wait_type_inner = LD_WAIT_SLEEP,       \
-       },
-#else
-# define __RWSEM_DEP_MAP_INIT(lockname)
-#endif
-
 #ifdef CONFIG_DEBUG_RWSEMS
 # define __RWSEM_DEBUG_INIT(lockname) .magic = &lockname,
 #else
@@ -119,6 +122,61 @@ static inline int rwsem_is_contended(struct rw_semaphore *sem)
        return !list_empty(&sem->wait_list);
 }
 
+#else /* !CONFIG_PREEMPT_RT */
+
+#include <linux/rwbase_rt.h>
+
+struct rw_semaphore {
+       struct rwbase_rt        rwbase;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+       struct lockdep_map      dep_map;
+#endif
+};
+
+#define __RWSEM_INITIALIZER(name)                              \
+       {                                                       \
+               .rwbase = __RWBASE_INITIALIZER(name),           \
+               __RWSEM_DEP_MAP_INIT(name)                      \
+       }
+
+#define DECLARE_RWSEM(lockname) \
+       struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname)
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+extern void  __rwsem_init(struct rw_semaphore *rwsem, const char *name,
+                         struct lock_class_key *key);
+#else
+static inline void  __rwsem_init(struct rw_semaphore *rwsem, const char *name,
+                                struct lock_class_key *key)
+{
+}
+#endif
+
+#define init_rwsem(sem)                                                \
+do {                                                           \
+       static struct lock_class_key __key;                     \
+                                                               \
+       init_rwbase_rt(&(sem)->rwbase);                 \
+       __rwsem_init((sem), #sem, &__key);                      \
+} while (0)
+
+static __always_inline int rwsem_is_locked(struct rw_semaphore *sem)
+{
+       return rw_base_is_locked(&sem->rwbase);
+}
+
+static __always_inline int rwsem_is_contended(struct rw_semaphore *sem)
+{
+       return rw_base_is_contended(&sem->rwbase);
+}
+
+#endif /* CONFIG_PREEMPT_RT */
+
+/*
+ * The functions below are the same for all rwsem implementations including
+ * the RT specific variant.
+ */
+
 /*
  * lock for reading
  */
index ec8d07d..1780260 100644 (file)
@@ -95,7 +95,9 @@ struct task_group;
 #define TASK_WAKING                    0x0200
 #define TASK_NOLOAD                    0x0400
 #define TASK_NEW                       0x0800
-#define TASK_STATE_MAX                 0x1000
+/* RT specific auxilliary flag to mark RT lock waiters */
+#define TASK_RTLOCK_WAIT               0x1000
+#define TASK_STATE_MAX                 0x2000
 
 /* Convenience macros for the sake of set_current_state: */
 #define TASK_KILLABLE                  (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
@@ -121,8 +123,6 @@ struct task_group;
 
 #define task_is_stopped_or_traced(task)        ((READ_ONCE(task->__state) & (__TASK_STOPPED | __TASK_TRACED)) != 0)
 
-#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
-
 /*
  * Special states are those that do not use the normal wait-loop pattern. See
  * the comment with set_special_state().
@@ -130,30 +130,37 @@ struct task_group;
 #define is_special_task_state(state)                           \
        ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD))
 
-#define __set_current_state(state_value)                       \
-       do {                                                    \
-               WARN_ON_ONCE(is_special_task_state(state_value));\
-               current->task_state_change = _THIS_IP_;         \
-               WRITE_ONCE(current->__state, (state_value));    \
-       } while (0)
-
-#define set_current_state(state_value)                         \
-       do {                                                    \
-               WARN_ON_ONCE(is_special_task_state(state_value));\
-               current->task_state_change = _THIS_IP_;         \
-               smp_store_mb(current->__state, (state_value));  \
+#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
+# define debug_normal_state_change(state_value)                                \
+       do {                                                            \
+               WARN_ON_ONCE(is_special_task_state(state_value));       \
+               current->task_state_change = _THIS_IP_;                 \
        } while (0)
 
-#define set_special_state(state_value)                                 \
+# define debug_special_state_change(state_value)                       \
        do {                                                            \
-               unsigned long flags; /* may shadow */                   \
                WARN_ON_ONCE(!is_special_task_state(state_value));      \
-               raw_spin_lock_irqsave(&current->pi_lock, flags);        \
                current->task_state_change = _THIS_IP_;                 \
-               WRITE_ONCE(current->__state, (state_value));            \
-               raw_spin_unlock_irqrestore(&current->pi_lock, flags);   \
        } while (0)
+
+# define debug_rtlock_wait_set_state()                                 \
+       do {                                                             \
+               current->saved_state_change = current->task_state_change;\
+               current->task_state_change = _THIS_IP_;                  \
+       } while (0)
+
+# define debug_rtlock_wait_restore_state()                             \
+       do {                                                             \
+               current->task_state_change = current->saved_state_change;\
+       } while (0)
+
 #else
+# define debug_normal_state_change(cond)       do { } while (0)
+# define debug_special_state_change(cond)      do { } while (0)
+# define debug_rtlock_wait_set_state()         do { } while (0)
+# define debug_rtlock_wait_restore_state()     do { } while (0)
+#endif
+
 /*
  * set_current_state() includes a barrier so that the write of current->state
  * is correctly serialised wrt the caller's subsequent test of whether to
@@ -192,26 +199,77 @@ struct task_group;
  * Also see the comments of try_to_wake_up().
  */
 #define __set_current_state(state_value)                               \
-       WRITE_ONCE(current->__state, (state_value))
+       do {                                                            \
+               debug_normal_state_change((state_value));               \
+               WRITE_ONCE(current->__state, (state_value));            \
+       } while (0)
 
 #define set_current_state(state_value)                                 \
-       smp_store_mb(current->__state, (state_value))
+       do {                                                            \
+               debug_normal_state_change((state_value));               \
+               smp_store_mb(current->__state, (state_value));          \
+       } while (0)
 
 /*
  * set_special_state() should be used for those states when the blocking task
  * can not use the regular condition based wait-loop. In that case we must
- * serialize against wakeups such that any possible in-flight TASK_RUNNING stores
- * will not collide with our state change.
+ * serialize against wakeups such that any possible in-flight TASK_RUNNING
+ * stores will not collide with our state change.
  */
 #define set_special_state(state_value)                                 \
        do {                                                            \
                unsigned long flags; /* may shadow */                   \
+                                                                       \
                raw_spin_lock_irqsave(&current->pi_lock, flags);        \
+               debug_special_state_change((state_value));              \
                WRITE_ONCE(current->__state, (state_value));            \
                raw_spin_unlock_irqrestore(&current->pi_lock, flags);   \
        } while (0)
 
-#endif
+/*
+ * PREEMPT_RT specific variants for "sleeping" spin/rwlocks
+ *
+ * RT's spin/rwlock substitutions are state preserving. The state of the
+ * task when blocking on the lock is saved in task_struct::saved_state and
+ * restored after the lock has been acquired.  These operations are
+ * serialized by task_struct::pi_lock against try_to_wake_up(). Any non RT
+ * lock related wakeups while the task is blocked on the lock are
+ * redirected to operate on task_struct::saved_state to ensure that these
+ * are not dropped. On restore task_struct::saved_state is set to
+ * TASK_RUNNING so any wakeup attempt redirected to saved_state will fail.
+ *
+ * The lock operation looks like this:
+ *
+ *     current_save_and_set_rtlock_wait_state();
+ *     for (;;) {
+ *             if (try_lock())
+ *                     break;
+ *             raw_spin_unlock_irq(&lock->wait_lock);
+ *             schedule_rtlock();
+ *             raw_spin_lock_irq(&lock->wait_lock);
+ *             set_current_state(TASK_RTLOCK_WAIT);
+ *     }
+ *     current_restore_rtlock_saved_state();
+ */
+#define current_save_and_set_rtlock_wait_state()                       \
+       do {                                                            \
+               lockdep_assert_irqs_disabled();                         \
+               raw_spin_lock(&current->pi_lock);                       \
+               current->saved_state = current->__state;                \
+               debug_rtlock_wait_set_state();                          \
+               WRITE_ONCE(current->__state, TASK_RTLOCK_WAIT);         \
+               raw_spin_unlock(&current->pi_lock);                     \
+       } while (0);
+
+#define current_restore_rtlock_saved_state()                           \
+       do {                                                            \
+               lockdep_assert_irqs_disabled();                         \
+               raw_spin_lock(&current->pi_lock);                       \
+               debug_rtlock_wait_restore_state();                      \
+               WRITE_ONCE(current->__state, current->saved_state);     \
+               current->saved_state = TASK_RUNNING;                    \
+               raw_spin_unlock(&current->pi_lock);                     \
+       } while (0);
 
 #define get_current_state()    READ_ONCE(current->__state)
 
@@ -230,6 +288,9 @@ extern long schedule_timeout_idle(long timeout);
 asmlinkage void schedule(void);
 extern void schedule_preempt_disabled(void);
 asmlinkage void preempt_schedule_irq(void);
+#ifdef CONFIG_PREEMPT_RT
+ extern void schedule_rtlock(void);
+#endif
 
 extern int __must_check io_schedule_prepare(void);
 extern void io_schedule_finish(int token);
@@ -668,6 +729,11 @@ struct task_struct {
 #endif
        unsigned int                    __state;
 
+#ifdef CONFIG_PREEMPT_RT
+       /* saved state for "spinlock sleepers" */
+       unsigned int                    saved_state;
+#endif
+
        /*
         * This begins the randomizable portion of task_struct. Only
         * scheduling-critical items should be added above here.
@@ -748,6 +814,7 @@ struct task_struct {
        unsigned int                    policy;
        int                             nr_cpus_allowed;
        const cpumask_t                 *cpus_ptr;
+       cpumask_t                       *user_cpus_ptr;
        cpumask_t                       cpus_mask;
        void                            *migration_pending;
 #ifdef CONFIG_SMP
@@ -863,6 +930,10 @@ struct task_struct {
        /* Used by page_owner=on to detect recursion in page tracking. */
        unsigned                        in_page_owner:1;
 #endif
+#ifdef CONFIG_EVENTFD
+       /* Recursion prevention for eventfd_signal() */
+       unsigned                        in_eventfd_signal:1;
+#endif
 
        unsigned long                   atomic_flags; /* Flags requiring atomic access. */
 
@@ -1357,6 +1428,9 @@ struct task_struct {
        struct kmap_ctrl                kmap_ctrl;
 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
        unsigned long                   task_state_change;
+# ifdef CONFIG_PREEMPT_RT
+       unsigned long                   saved_state_change;
+# endif
 #endif
        int                             pagefault_disabled;
 #ifdef CONFIG_MMU
@@ -1400,6 +1474,16 @@ struct task_struct {
        struct llist_head               kretprobe_instances;
 #endif
 
+#ifdef CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH
+       /*
+        * If L1D flush is supported on mm context switch
+        * then we use this callback head to queue kill work
+        * to kill tasks that are not running on SMT disabled
+        * cores
+        */
+       struct callback_head            l1d_flush_kill;
+#endif
+
        /*
         * New fields for task_struct should be added above here, so that
         * they are included in the randomized portion of task_struct.
@@ -1705,6 +1789,11 @@ extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_
 #ifdef CONFIG_SMP
 extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
 extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
+extern int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node);
+extern void release_user_cpus_ptr(struct task_struct *p);
+extern int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask);
+extern void force_compatible_cpus_allowed_ptr(struct task_struct *p);
+extern void relax_compatible_cpus_allowed_ptr(struct task_struct *p);
 #else
 static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
 {
@@ -1715,6 +1804,21 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpuma
                return -EINVAL;
        return 0;
 }
+static inline int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node)
+{
+       if (src->user_cpus_ptr)
+               return -EINVAL;
+       return 0;
+}
+static inline void release_user_cpus_ptr(struct task_struct *p)
+{
+       WARN_ON(p->user_cpus_ptr);
+}
+
+static inline int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
+{
+       return 0;
+}
 #endif
 
 extern int yield_to(struct task_struct *p, bool preempt);
index b9126fe..0310a5a 100644 (file)
@@ -714,6 +714,12 @@ static inline void unlock_task_sighand(struct task_struct *task,
        spin_unlock_irqrestore(&task->sighand->siglock, *flags);
 }
 
+#ifdef CONFIG_LOCKDEP
+extern void lockdep_assert_task_sighand_held(struct task_struct *task);
+#else
+static inline void lockdep_assert_task_sighand_held(struct task_struct *task) { }
+#endif
+
 static inline unsigned long task_rlimit(const struct task_struct *task,
                unsigned int limit)
 {
index db2c0f3..304f431 100644 (file)
@@ -28,30 +28,12 @@ enum { sysctl_hung_task_timeout_secs = 0 };
 
 extern unsigned int sysctl_sched_child_runs_first;
 
-extern unsigned int sysctl_sched_latency;
-extern unsigned int sysctl_sched_min_granularity;
-extern unsigned int sysctl_sched_wakeup_granularity;
-
 enum sched_tunable_scaling {
        SCHED_TUNABLESCALING_NONE,
        SCHED_TUNABLESCALING_LOG,
        SCHED_TUNABLESCALING_LINEAR,
        SCHED_TUNABLESCALING_END,
 };
-extern unsigned int sysctl_sched_tunable_scaling;
-
-extern unsigned int sysctl_numa_balancing_scan_delay;
-extern unsigned int sysctl_numa_balancing_scan_period_min;
-extern unsigned int sysctl_numa_balancing_scan_period_max;
-extern unsigned int sysctl_numa_balancing_scan_size;
-
-#ifdef CONFIG_SCHED_DEBUG
-extern __read_mostly unsigned int sysctl_sched_migration_cost;
-extern __read_mostly unsigned int sysctl_sched_nr_migrate;
-
-extern int sysctl_resched_latency_warn_ms;
-extern int sysctl_resched_latency_warn_once;
-#endif
 
 /*
  *  control realtime throttling:
index 26a2013..06cd8fb 100644 (file)
@@ -42,8 +42,11 @@ struct wake_q_head {
 
 #define WAKE_Q_TAIL ((struct wake_q_node *) 0x01)
 
-#define DEFINE_WAKE_Q(name)                            \
-       struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
+#define WAKE_Q_HEAD_INITIALIZER(name)                          \
+       { WAKE_Q_TAIL, &name.first }
+
+#define DEFINE_WAKE_Q(name)                                    \
+       struct wake_q_head name = WAKE_Q_HEAD_INITIALIZER(name)
 
 static inline void wake_q_init(struct wake_q_head *head)
 {
index 24eda04..5b72885 100644 (file)
@@ -120,10 +120,11 @@ enum lockdown_reason {
        LOCKDOWN_MMIOTRACE,
        LOCKDOWN_DEBUGFS,
        LOCKDOWN_XMON_WR,
+       LOCKDOWN_BPF_WRITE_USER,
        LOCKDOWN_INTEGRITY_MAX,
        LOCKDOWN_KCORE,
        LOCKDOWN_KPROBES,
-       LOCKDOWN_BPF_READ,
+       LOCKDOWN_BPF_READ_KERNEL,
        LOCKDOWN_PERF,
        LOCKDOWN_TRACEFS,
        LOCKDOWN_XMON_RW,
index 52d7fb9..c58cc14 100644 (file)
@@ -518,6 +518,25 @@ static inline void uart_unlock_and_check_sysrq(struct uart_port *port)
        if (sysrq_ch)
                handle_sysrq(sysrq_ch);
 }
+
+static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port,
+               unsigned long flags)
+{
+       int sysrq_ch;
+
+       if (!port->has_sysrq) {
+               spin_unlock_irqrestore(&port->lock, flags);
+               return;
+       }
+
+       sysrq_ch = port->sysrq_ch;
+       port->sysrq_ch = 0;
+
+       spin_unlock_irqrestore(&port->lock, flags);
+
+       if (sysrq_ch)
+               handle_sysrq(sysrq_ch);
+}
 #else  /* CONFIG_MAGIC_SYSRQ_SERIAL */
 static inline int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch)
 {
@@ -531,6 +550,11 @@ static inline void uart_unlock_and_check_sysrq(struct uart_port *port)
 {
        spin_unlock(&port->lock);
 }
+static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port,
+               unsigned long flags)
+{
+       spin_unlock_irqrestore(&port->lock, flags);
+}
 #endif /* CONFIG_MAGIC_SYSRQ_SERIAL */
 
 /*
index 96f3190..14ab0c0 100644 (file)
@@ -285,11 +285,45 @@ static inline struct sk_psock *sk_psock(const struct sock *sk)
        return rcu_dereference_sk_user_data(sk);
 }
 
+static inline void sk_psock_set_state(struct sk_psock *psock,
+                                     enum sk_psock_state_bits bit)
+{
+       set_bit(bit, &psock->state);
+}
+
+static inline void sk_psock_clear_state(struct sk_psock *psock,
+                                       enum sk_psock_state_bits bit)
+{
+       clear_bit(bit, &psock->state);
+}
+
+static inline bool sk_psock_test_state(const struct sk_psock *psock,
+                                      enum sk_psock_state_bits bit)
+{
+       return test_bit(bit, &psock->state);
+}
+
+static inline void sock_drop(struct sock *sk, struct sk_buff *skb)
+{
+       sk_drops_add(sk, skb);
+       kfree_skb(skb);
+}
+
+static inline void drop_sk_msg(struct sk_psock *psock, struct sk_msg *msg)
+{
+       if (msg->skb)
+               sock_drop(psock->sk, msg->skb);
+       kfree(msg);
+}
+
 static inline void sk_psock_queue_msg(struct sk_psock *psock,
                                      struct sk_msg *msg)
 {
        spin_lock_bh(&psock->ingress_lock);
-       list_add_tail(&msg->list, &psock->ingress_msg);
+       if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
+               list_add_tail(&msg->list, &psock->ingress_msg);
+       else
+               drop_sk_msg(psock, msg);
        spin_unlock_bh(&psock->ingress_lock);
 }
 
@@ -406,24 +440,6 @@ static inline void sk_psock_restore_proto(struct sock *sk,
                psock->psock_update_sk_prot(sk, psock, true);
 }
 
-static inline void sk_psock_set_state(struct sk_psock *psock,
-                                     enum sk_psock_state_bits bit)
-{
-       set_bit(bit, &psock->state);
-}
-
-static inline void sk_psock_clear_state(struct sk_psock *psock,
-                                       enum sk_psock_state_bits bit)
-{
-       clear_bit(bit, &psock->state);
-}
-
-static inline bool sk_psock_test_state(const struct sk_psock *psock,
-                                      enum sk_psock_state_bits bit)
-{
-       return test_bit(bit, &psock->state);
-}
-
 static inline struct sk_psock *sk_psock_get(struct sock *sk)
 {
        struct sk_psock *psock;
index 97b8d12..8371bca 100644 (file)
@@ -147,7 +147,11 @@ extern int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer);
  *     not using a GPIO line)
  * @word_delay: delay to be inserted between consecutive
  *     words of a transfer
- *
+ * @cs_setup: delay to be introduced by the controller after CS is asserted
+ * @cs_hold: delay to be introduced by the controller before CS is deasserted
+ * @cs_inactive: delay to be introduced by the controller after CS is
+ *     deasserted. If @cs_change_delay is used from @spi_transfer, then the
+ *     two delays will be added up.
  * @statistics: statistics for the spi_device
  *
  * A @spi_device is used to interchange data between an SPI slave
@@ -188,6 +192,10 @@ struct spi_device {
        int                     cs_gpio;        /* LEGACY: chip select gpio */
        struct gpio_desc        *cs_gpiod;      /* chip select gpio desc */
        struct spi_delay        word_delay; /* inter-word delay */
+       /* CS delays */
+       struct spi_delay        cs_setup;
+       struct spi_delay        cs_hold;
+       struct spi_delay        cs_inactive;
 
        /* the statistics */
        struct spi_statistics   statistics;
@@ -339,6 +347,7 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch
  * @max_speed_hz: Highest supported transfer speed
  * @flags: other constraints relevant to this driver
  * @slave: indicates that this is an SPI slave controller
+ * @devm_allocated: whether the allocation of this struct is devres-managed
  * @max_transfer_size: function that returns the max transfer size for
  *     a &spi_device; may be %NULL, so the default %SIZE_MAX will be used.
  * @max_message_size: function that returns the max message size for
@@ -412,11 +421,6 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch
  *          controller has native support for memory like operations.
  * @unprepare_message: undo any work done by prepare_message().
  * @slave_abort: abort the ongoing transfer request on an SPI slave controller
- * @cs_setup: delay to be introduced by the controller after CS is asserted
- * @cs_hold: delay to be introduced by the controller before CS is deasserted
- * @cs_inactive: delay to be introduced by the controller after CS is
- *     deasserted. If @cs_change_delay is used from @spi_transfer, then the
- *     two delays will be added up.
  * @cs_gpios: LEGACY: array of GPIO descs to use as chip select lines; one per
  *     CS number. Any individual value may be -ENOENT for CS lines that
  *     are not GPIOs (driven by the SPI controller itself). Use the cs_gpiods
@@ -511,7 +515,7 @@ struct spi_controller {
 
 #define SPI_MASTER_GPIO_SS             BIT(5)  /* GPIO CS must select slave */
 
-       /* flag indicating this is a non-devres managed controller */
+       /* flag indicating if the allocation of this struct is devres-managed */
        bool                    devm_allocated;
 
        /* flag indicating this is an SPI slave controller */
@@ -550,8 +554,7 @@ struct spi_controller {
         * to configure specific CS timing through spi_set_cs_timing() after
         * spi_setup().
         */
-       int (*set_cs_timing)(struct spi_device *spi, struct spi_delay *setup,
-                            struct spi_delay *hold, struct spi_delay *inactive);
+       int (*set_cs_timing)(struct spi_device *spi);
 
        /* bidirectional bulk transfers
         *
@@ -638,11 +641,6 @@ struct spi_controller {
        /* Optimized handlers for SPI memory-like operations. */
        const struct spi_controller_mem_ops *mem_ops;
 
-       /* CS delays */
-       struct spi_delay        cs_setup;
-       struct spi_delay        cs_hold;
-       struct spi_delay        cs_inactive;
-
        /* gpio chip select */
        int                     *cs_gpios;
        struct gpio_desc        **cs_gpiods;
index 7989784..45310ea 100644 (file)
@@ -12,6 +12,8 @@
  *  asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
  *                        initializers
  *
+ *  linux/spinlock_types_raw:
+ *                       The raw types and initializers
  *  linux/spinlock_types.h:
  *                        defines the generic type and initializers
  *
@@ -31,6 +33,8 @@
  *                        contains the generic, simplified UP spinlock type.
  *                        (which is an empty structure on non-debug builds)
  *
+ *  linux/spinlock_types_raw:
+ *                       The raw RT types and initializers
  *  linux/spinlock_types.h:
  *                        defines the generic type and initializers
  *
@@ -308,8 +312,10 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
        1 : ({ local_irq_restore(flags); 0; }); \
 })
 
-/* Include rwlock functions */
+#ifndef CONFIG_PREEMPT_RT
+/* Include rwlock functions for !RT */
 #include <linux/rwlock.h>
+#endif
 
 /*
  * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
@@ -320,6 +326,9 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
 # include <linux/spinlock_api_up.h>
 #endif
 
+/* Non PREEMPT_RT kernel, map to raw spinlocks: */
+#ifndef CONFIG_PREEMPT_RT
+
 /*
  * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
  */
@@ -454,6 +463,10 @@ static __always_inline int spin_is_contended(spinlock_t *lock)
 
 #define assert_spin_locked(lock)       assert_raw_spin_locked(&(lock)->rlock)
 
+#else  /* !CONFIG_PREEMPT_RT */
+# include <linux/spinlock_rt.h>
+#endif /* CONFIG_PREEMPT_RT */
+
 /*
  * Pull the atomic_t declaration:
  * (asm-mips/atomic.h needs above definitions)
index 19a9be9..6b8e1a0 100644 (file)
@@ -187,6 +187,9 @@ static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock)
        return 0;
 }
 
+/* PREEMPT_RT has its own rwlock implementation */
+#ifndef CONFIG_PREEMPT_RT
 #include <linux/rwlock_api_smp.h>
+#endif
 
 #endif /* __LINUX_SPINLOCK_API_SMP_H */
diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h
new file mode 100644 (file)
index 0000000..835aeda
--- /dev/null
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#ifndef __LINUX_SPINLOCK_RT_H
+#define __LINUX_SPINLOCK_RT_H
+
+#ifndef __LINUX_SPINLOCK_H
+#error Do not include directly. Use spinlock.h
+#endif
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+extern void __rt_spin_lock_init(spinlock_t *lock, const char *name,
+                               struct lock_class_key *key, bool percpu);
+#else
+static inline void __rt_spin_lock_init(spinlock_t *lock, const char *name,
+                               struct lock_class_key *key, bool percpu)
+{
+}
+#endif
+
+#define spin_lock_init(slock)                                  \
+do {                                                           \
+       static struct lock_class_key __key;                     \
+                                                               \
+       rt_mutex_base_init(&(slock)->lock);                     \
+       __rt_spin_lock_init(slock, #slock, &__key, false);      \
+} while (0)
+
+#define local_spin_lock_init(slock)                            \
+do {                                                           \
+       static struct lock_class_key __key;                     \
+                                                               \
+       rt_mutex_base_init(&(slock)->lock);                     \
+       __rt_spin_lock_init(slock, #slock, &__key, true);       \
+} while (0)
+
+extern void rt_spin_lock(spinlock_t *lock);
+extern void rt_spin_lock_nested(spinlock_t *lock, int subclass);
+extern void rt_spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *nest_lock);
+extern void rt_spin_unlock(spinlock_t *lock);
+extern void rt_spin_lock_unlock(spinlock_t *lock);
+extern int rt_spin_trylock_bh(spinlock_t *lock);
+extern int rt_spin_trylock(spinlock_t *lock);
+
+static __always_inline void spin_lock(spinlock_t *lock)
+{
+       rt_spin_lock(lock);
+}
+
+#ifdef CONFIG_LOCKDEP
+# define __spin_lock_nested(lock, subclass)                            \
+       rt_spin_lock_nested(lock, subclass)
+
+# define __spin_lock_nest_lock(lock, nest_lock)                                \
+       do {                                                            \
+               typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
+               rt_spin_lock_nest_lock(lock, &(nest_lock)->dep_map);    \
+       } while (0)
+# define __spin_lock_irqsave_nested(lock, flags, subclass)     \
+       do {                                                    \
+               typecheck(unsigned long, flags);                \
+               flags = 0;                                      \
+               __spin_lock_nested(lock, subclass);             \
+       } while (0)
+
+#else
+ /*
+  * Always evaluate the 'subclass' argument to avoid that the compiler
+  * warns about set-but-not-used variables when building with
+  * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
+  */
+# define __spin_lock_nested(lock, subclass)    spin_lock(((void)(subclass), (lock)))
+# define __spin_lock_nest_lock(lock, subclass) spin_lock(((void)(subclass), (lock)))
+# define __spin_lock_irqsave_nested(lock, flags, subclass)     \
+       spin_lock_irqsave(((void)(subclass), (lock)), flags)
+#endif
+
+#define spin_lock_nested(lock, subclass)               \
+       __spin_lock_nested(lock, subclass)
+
+#define spin_lock_nest_lock(lock, nest_lock)           \
+       __spin_lock_nest_lock(lock, nest_lock)
+
+#define spin_lock_irqsave_nested(lock, flags, subclass)        \
+       __spin_lock_irqsave_nested(lock, flags, subclass)
+
+static __always_inline void spin_lock_bh(spinlock_t *lock)
+{
+       /* Investigate: Drop bh when blocking ? */
+       local_bh_disable();
+       rt_spin_lock(lock);
+}
+
+static __always_inline void spin_lock_irq(spinlock_t *lock)
+{
+       rt_spin_lock(lock);
+}
+
+#define spin_lock_irqsave(lock, flags)                  \
+       do {                                             \
+               typecheck(unsigned long, flags);         \
+               flags = 0;                               \
+               spin_lock(lock);                         \
+       } while (0)
+
+static __always_inline void spin_unlock(spinlock_t *lock)
+{
+       rt_spin_unlock(lock);
+}
+
+static __always_inline void spin_unlock_bh(spinlock_t *lock)
+{
+       rt_spin_unlock(lock);
+       local_bh_enable();
+}
+
+static __always_inline void spin_unlock_irq(spinlock_t *lock)
+{
+       rt_spin_unlock(lock);
+}
+
+static __always_inline void spin_unlock_irqrestore(spinlock_t *lock,
+                                                  unsigned long flags)
+{
+       rt_spin_unlock(lock);
+}
+
+#define spin_trylock(lock)                             \
+       __cond_lock(lock, rt_spin_trylock(lock))
+
+#define spin_trylock_bh(lock)                          \
+       __cond_lock(lock, rt_spin_trylock_bh(lock))
+
+#define spin_trylock_irq(lock)                         \
+       __cond_lock(lock, rt_spin_trylock(lock))
+
+#define __spin_trylock_irqsave(lock, flags)            \
+({                                                     \
+       int __locked;                                   \
+                                                       \
+       typecheck(unsigned long, flags);                \
+       flags = 0;                                      \
+       __locked = spin_trylock(lock);                  \
+       __locked;                                       \
+})
+
+#define spin_trylock_irqsave(lock, flags)              \
+       __cond_lock(lock, __spin_trylock_irqsave(lock, flags))
+
+#define spin_is_contended(lock)                (((void)(lock), 0))
+
+static inline int spin_is_locked(spinlock_t *lock)
+{
+       return rt_mutex_base_is_locked(&lock->lock);
+}
+
+#define assert_spin_locked(lock) BUG_ON(!spin_is_locked(lock))
+
+#include <linux/rwlock_rt.h>
+
+#endif
index b981caa..2dfa35f 100644 (file)
@@ -9,65 +9,11 @@
  * Released under the General Public License (GPL).
  */
 
-#if defined(CONFIG_SMP)
-# include <asm/spinlock_types.h>
-#else
-# include <linux/spinlock_types_up.h>
-#endif
-
-#include <linux/lockdep_types.h>
+#include <linux/spinlock_types_raw.h>
 
-typedef struct raw_spinlock {
-       arch_spinlock_t raw_lock;
-#ifdef CONFIG_DEBUG_SPINLOCK
-       unsigned int magic, owner_cpu;
-       void *owner;
-#endif
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-       struct lockdep_map dep_map;
-#endif
-} raw_spinlock_t;
-
-#define SPINLOCK_MAGIC         0xdead4ead
-
-#define SPINLOCK_OWNER_INIT    ((void *)-1L)
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define RAW_SPIN_DEP_MAP_INIT(lockname)               \
-       .dep_map = {                                    \
-               .name = #lockname,                      \
-               .wait_type_inner = LD_WAIT_SPIN,        \
-       }
-# define SPIN_DEP_MAP_INIT(lockname)                   \
-       .dep_map = {                                    \
-               .name = #lockname,                      \
-               .wait_type_inner = LD_WAIT_CONFIG,      \
-       }
-#else
-# define RAW_SPIN_DEP_MAP_INIT(lockname)
-# define SPIN_DEP_MAP_INIT(lockname)
-#endif
-
-#ifdef CONFIG_DEBUG_SPINLOCK
-# define SPIN_DEBUG_INIT(lockname)             \
-       .magic = SPINLOCK_MAGIC,                \
-       .owner_cpu = -1,                        \
-       .owner = SPINLOCK_OWNER_INIT,
-#else
-# define SPIN_DEBUG_INIT(lockname)
-#endif
-
-#define __RAW_SPIN_LOCK_INITIALIZER(lockname)  \
-       {                                       \
-       .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED,  \
-       SPIN_DEBUG_INIT(lockname)               \
-       RAW_SPIN_DEP_MAP_INIT(lockname) }
-
-#define __RAW_SPIN_LOCK_UNLOCKED(lockname)     \
-       (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
-
-#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
+#ifndef CONFIG_PREEMPT_RT
 
+/* Non PREEMPT_RT kernels map spinlock to raw_spinlock */
 typedef struct spinlock {
        union {
                struct raw_spinlock rlock;
@@ -96,6 +42,35 @@ typedef struct spinlock {
 
 #define DEFINE_SPINLOCK(x)     spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
 
+#else /* !CONFIG_PREEMPT_RT */
+
+/* PREEMPT_RT kernels map spinlock to rt_mutex */
+#include <linux/rtmutex.h>
+
+typedef struct spinlock {
+       struct rt_mutex_base    lock;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+       struct lockdep_map      dep_map;
+#endif
+} spinlock_t;
+
+#define __SPIN_LOCK_UNLOCKED(name)                             \
+       {                                                       \
+               .lock = __RT_MUTEX_BASE_INITIALIZER(name.lock), \
+               SPIN_DEP_MAP_INIT(name)                         \
+       }
+
+#define __LOCAL_SPIN_LOCK_UNLOCKED(name)                       \
+       {                                                       \
+               .lock = __RT_MUTEX_BASE_INITIALIZER(name.lock), \
+               LOCAL_SPIN_DEP_MAP_INIT(name)                   \
+       }
+
+#define DEFINE_SPINLOCK(name)                                  \
+       spinlock_t name = __SPIN_LOCK_UNLOCKED(name)
+
+#endif /* CONFIG_PREEMPT_RT */
+
 #include <linux/rwlock_types.h>
 
 #endif /* __LINUX_SPINLOCK_TYPES_H */
diff --git a/include/linux/spinlock_types_raw.h b/include/linux/spinlock_types_raw.h
new file mode 100644 (file)
index 0000000..91cb36b
--- /dev/null
@@ -0,0 +1,73 @@
+#ifndef __LINUX_SPINLOCK_TYPES_RAW_H
+#define __LINUX_SPINLOCK_TYPES_RAW_H
+
+#include <linux/types.h>
+
+#if defined(CONFIG_SMP)
+# include <asm/spinlock_types.h>
+#else
+# include <linux/spinlock_types_up.h>
+#endif
+
+#include <linux/lockdep_types.h>
+
+typedef struct raw_spinlock {
+       arch_spinlock_t raw_lock;
+#ifdef CONFIG_DEBUG_SPINLOCK
+       unsigned int magic, owner_cpu;
+       void *owner;
+#endif
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+       struct lockdep_map dep_map;
+#endif
+} raw_spinlock_t;
+
+#define SPINLOCK_MAGIC         0xdead4ead
+
+#define SPINLOCK_OWNER_INIT    ((void *)-1L)
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define RAW_SPIN_DEP_MAP_INIT(lockname)               \
+       .dep_map = {                                    \
+               .name = #lockname,                      \
+               .wait_type_inner = LD_WAIT_SPIN,        \
+       }
+# define SPIN_DEP_MAP_INIT(lockname)                   \
+       .dep_map = {                                    \
+               .name = #lockname,                      \
+               .wait_type_inner = LD_WAIT_CONFIG,      \
+       }
+
+# define LOCAL_SPIN_DEP_MAP_INIT(lockname)             \
+       .dep_map = {                                    \
+               .name = #lockname,                      \
+               .wait_type_inner = LD_WAIT_CONFIG,      \
+               .lock_type = LD_LOCK_PERCPU,            \
+       }
+#else
+# define RAW_SPIN_DEP_MAP_INIT(lockname)
+# define SPIN_DEP_MAP_INIT(lockname)
+# define LOCAL_SPIN_DEP_MAP_INIT(lockname)
+#endif
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+# define SPIN_DEBUG_INIT(lockname)             \
+       .magic = SPINLOCK_MAGIC,                \
+       .owner_cpu = -1,                        \
+       .owner = SPINLOCK_OWNER_INIT,
+#else
+# define SPIN_DEBUG_INIT(lockname)
+#endif
+
+#define __RAW_SPIN_LOCK_INITIALIZER(lockname)  \
+{                                              \
+       .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED,  \
+       SPIN_DEBUG_INIT(lockname)               \
+       RAW_SPIN_DEP_MAP_INIT(lockname) }
+
+#define __RAW_SPIN_LOCK_UNLOCKED(lockname)     \
+       (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
+
+#define DEFINE_RAW_SPINLOCK(x)  raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
+
+#endif /* __LINUX_SPINLOCK_TYPES_RAW_H */
index 0e0cf4d..6cfaa0a 100644 (file)
@@ -61,7 +61,7 @@ static inline int __srcu_read_lock(struct srcu_struct *ssp)
        int idx;
 
        idx = ((READ_ONCE(ssp->srcu_idx) + 1) & 0x2) >> 1;
-       WRITE_ONCE(ssp->srcu_lock_nesting[idx], ssp->srcu_lock_nesting[idx] + 1);
+       WRITE_ONCE(ssp->srcu_lock_nesting[idx], READ_ONCE(ssp->srcu_lock_nesting[idx]) + 1);
        return idx;
 }
 
@@ -81,11 +81,11 @@ static inline void srcu_torture_stats_print(struct srcu_struct *ssp,
 {
        int idx;
 
-       idx = ((READ_ONCE(ssp->srcu_idx) + 1) & 0x2) >> 1;
+       idx = ((data_race(READ_ONCE(ssp->srcu_idx)) + 1) & 0x2) >> 1;
        pr_alert("%s%s Tiny SRCU per-CPU(idx=%d): (%hd,%hd)\n",
                 tt, tf, idx,
-                READ_ONCE(ssp->srcu_lock_nesting[!idx]),
-                READ_ONCE(ssp->srcu_lock_nesting[idx]));
+                data_race(READ_ONCE(ssp->srcu_lock_nesting[!idx])),
+                data_race(READ_ONCE(ssp->srcu_lock_nesting[idx])));
 }
 
 #endif
index fc94faa..3e56a97 100644 (file)
  *   DECLARE_STATIC_CALL(name, func);
  *   DEFINE_STATIC_CALL(name, func);
  *   DEFINE_STATIC_CALL_NULL(name, typename);
+ *   DEFINE_STATIC_CALL_RET0(name, typename);
+ *
+ *   __static_call_return0;
+ *
  *   static_call(name)(args...);
  *   static_call_cond(name)(args...);
  *   static_call_update(name, func);
  *   static_call_query(name);
  *
+ *   EXPORT_STATIC_CALL{,_TRAMP}{,_GPL}()
+ *
  * Usage example:
  *
  *   # Start with the following functions (with identical prototypes):
  *   To query which function is currently set to be called, use:
  *
  *   func = static_call_query(name);
+ *
+ *
+ * DEFINE_STATIC_CALL_RET0 / __static_call_return0:
+ *
+ *   Just like how DEFINE_STATIC_CALL_NULL() / static_call_cond() optimize the
+ *   conditional void function call, DEFINE_STATIC_CALL_RET0 /
+ *   __static_call_return0 optimize the do nothing return 0 function.
+ *
+ *   This feature is strictly UB per the C standard (since it casts a function
+ *   pointer to a different signature) and relies on the architecture ABI to
+ *   make things work. In particular it relies on Caller Stack-cleanup and the
+ *   whole return register being clobbered for short return values. All normal
+ *   CDECL style ABIs conform.
+ *
+ *   In particular the x86_64 implementation replaces the 5 byte CALL
+ *   instruction at the callsite with a 5 byte clear of the RAX register,
+ *   completely eliding any function call overhead.
+ *
+ *   Notably argument setup is unconditional.
+ *
+ *
+ * EXPORT_STATIC_CALL() vs EXPORT_STATIC_CALL_TRAMP():
+ *
+ *   The difference is that the _TRAMP variant tries to only export the
+ *   trampoline with the result that a module can use static_call{,_cond}() but
+ *   not static_call_update().
+ *
  */
 
 #include <linux/types.h>
index 54269e4..3ebfea0 100644 (file)
@@ -27,6 +27,7 @@
 #define TEE_SHM_USER_MAPPED    BIT(4)  /* Memory mapped in user space */
 #define TEE_SHM_POOL           BIT(5)  /* Memory allocated from pool */
 #define TEE_SHM_KERNEL_MAPPED  BIT(6)  /* Memory mapped in kernel space */
+#define TEE_SHM_PRIV           BIT(7)  /* Memory private to TEE driver */
 
 struct device;
 struct tee_device;
@@ -332,6 +333,7 @@ void *tee_get_drvdata(struct tee_device *teedev);
  * @returns a pointer to 'struct tee_shm'
  */
 struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags);
+struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size);
 
 /**
  * tee_shm_register() - Register shared memory buffer
index 3aee78d..784659d 100644 (file)
@@ -196,6 +196,7 @@ struct otg_fsm {
        struct mutex lock;
        u8 *host_req_flag;
        struct delayed_work hnp_polling_work;
+       bool hnp_work_inited;
        bool state_changed;
 };
 
index 3357ac9..8cfe49d 100644 (file)
@@ -277,6 +277,17 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent,
                                        const struct vdpa_config_ops *config,
                                        size_t size, const char *name);
 
+/**
+ * vdpa_alloc_device - allocate and initilaize a vDPA device
+ *
+ * @dev_struct: the type of the parent structure
+ * @member: the name of struct vdpa_device within the @dev_struct
+ * @parent: the parent device
+ * @config: the bus operations that is supported by this device
+ * @name: name of the vdpa device
+ *
+ * Return allocated data structure or ERR_PTR upon error
+ */
 #define vdpa_alloc_device(dev_struct, member, parent, config, name)   \
                          container_of(__vdpa_alloc_device( \
                                       parent, config, \
index b1894e0..41edbc0 100644 (file)
@@ -110,6 +110,7 @@ struct virtio_device {
        bool config_enabled;
        bool config_change_pending;
        spinlock_t config_lock;
+       spinlock_t vqs_list_lock; /* Protects VQs list access */
        struct device dev;
        struct virtio_device_id id;
        const struct virtio_config_ops *config;
index 84db7b8..212892c 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/virtio_byteorder.h>
 #include <linux/uio.h>
 #include <linux/slab.h>
+#include <linux/spinlock.h>
 #if IS_REACHABLE(CONFIG_VHOST_IOTLB)
 #include <linux/dma-direction.h>
 #include <linux/vhost_iotlb.h>
index 6598ae3..93dab0e 100644 (file)
@@ -56,7 +56,7 @@ struct task_struct;
 
 #define __WAIT_QUEUE_HEAD_INITIALIZER(name) {                                  \
        .lock           = __SPIN_LOCK_UNLOCKED(name.lock),                      \
-       .head           = { &(name).head, &(name).head } }
+       .head           = LIST_HEAD_INIT(name.head) }
 
 #define DECLARE_WAIT_QUEUE_HEAD(name) \
        struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
index 667e86c..270677d 100644 (file)
@@ -336,14 +336,9 @@ static inline void cgroup_writeback_umount(void)
 /*
  * mm/page-writeback.c
  */
-#ifdef CONFIG_BLOCK
 void laptop_io_completion(struct backing_dev_info *info);
 void laptop_sync_completion(void);
-void laptop_mode_sync(struct work_struct *work);
 void laptop_mode_timer_fn(struct timer_list *t);
-#else
-static inline void laptop_sync_completion(void) { }
-#endif
 bool node_dirty_ok(struct pglist_data *pgdat);
 int wb_domain_init(struct wb_domain *dom, gfp_t gfp);
 #ifdef CONFIG_CGROUP_WRITEBACK
index b77f39f..29db736 100644 (file)
 #define __LINUX_WW_MUTEX_H
 
 #include <linux/mutex.h>
+#include <linux/rtmutex.h>
+
+#if defined(CONFIG_DEBUG_MUTEXES) || \
+   (defined(CONFIG_PREEMPT_RT) && defined(CONFIG_DEBUG_RT_MUTEXES))
+#define DEBUG_WW_MUTEXES
+#endif
+
+#ifndef CONFIG_PREEMPT_RT
+#define WW_MUTEX_BASE                  mutex
+#define ww_mutex_base_init(l,n,k)      __mutex_init(l,n,k)
+#define ww_mutex_base_trylock(l)       mutex_trylock(l)
+#define ww_mutex_base_is_locked(b)     mutex_is_locked((b))
+#else
+#define WW_MUTEX_BASE                  rt_mutex
+#define ww_mutex_base_init(l,n,k)      __rt_mutex_init(l,n,k)
+#define ww_mutex_base_trylock(l)       rt_mutex_trylock(l)
+#define ww_mutex_base_is_locked(b)     rt_mutex_base_is_locked(&(b)->rtmutex)
+#endif
 
 struct ww_class {
        atomic_long_t stamp;
@@ -28,16 +46,24 @@ struct ww_class {
        unsigned int is_wait_die;
 };
 
+struct ww_mutex {
+       struct WW_MUTEX_BASE base;
+       struct ww_acquire_ctx *ctx;
+#ifdef DEBUG_WW_MUTEXES
+       struct ww_class *ww_class;
+#endif
+};
+
 struct ww_acquire_ctx {
        struct task_struct *task;
        unsigned long stamp;
        unsigned int acquired;
        unsigned short wounded;
        unsigned short is_wait_die;
-#ifdef CONFIG_DEBUG_MUTEXES
+#ifdef DEBUG_WW_MUTEXES
        unsigned int done_acquire;
        struct ww_class *ww_class;
-       struct ww_mutex *contending_lock;
+       void *contending_lock;
 #endif
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
        struct lockdep_map dep_map;
@@ -74,9 +100,9 @@ struct ww_acquire_ctx {
 static inline void ww_mutex_init(struct ww_mutex *lock,
                                 struct ww_class *ww_class)
 {
-       __mutex_init(&lock->base, ww_class->mutex_name, &ww_class->mutex_key);
+       ww_mutex_base_init(&lock->base, ww_class->mutex_name, &ww_class->mutex_key);
        lock->ctx = NULL;
-#ifdef CONFIG_DEBUG_MUTEXES
+#ifdef DEBUG_WW_MUTEXES
        lock->ww_class = ww_class;
 #endif
 }
@@ -113,7 +139,7 @@ static inline void ww_acquire_init(struct ww_acquire_ctx *ctx,
        ctx->acquired = 0;
        ctx->wounded = false;
        ctx->is_wait_die = ww_class->is_wait_die;
-#ifdef CONFIG_DEBUG_MUTEXES
+#ifdef DEBUG_WW_MUTEXES
        ctx->ww_class = ww_class;
        ctx->done_acquire = 0;
        ctx->contending_lock = NULL;
@@ -143,7 +169,7 @@ static inline void ww_acquire_init(struct ww_acquire_ctx *ctx,
  */
 static inline void ww_acquire_done(struct ww_acquire_ctx *ctx)
 {
-#ifdef CONFIG_DEBUG_MUTEXES
+#ifdef DEBUG_WW_MUTEXES
        lockdep_assert_held(ctx);
 
        DEBUG_LOCKS_WARN_ON(ctx->done_acquire);
@@ -163,7 +189,7 @@ static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
        mutex_release(&ctx->dep_map, _THIS_IP_);
 #endif
-#ifdef CONFIG_DEBUG_MUTEXES
+#ifdef DEBUG_WW_MUTEXES
        DEBUG_LOCKS_WARN_ON(ctx->acquired);
        if (!IS_ENABLED(CONFIG_PROVE_LOCKING))
                /*
@@ -269,7 +295,7 @@ static inline void
 ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 {
        int ret;
-#ifdef CONFIG_DEBUG_MUTEXES
+#ifdef DEBUG_WW_MUTEXES
        DEBUG_LOCKS_WARN_ON(!ctx->contending_lock);
 #endif
        ret = ww_mutex_lock(lock, ctx);
@@ -305,7 +331,7 @@ static inline int __must_check
 ww_mutex_lock_slow_interruptible(struct ww_mutex *lock,
                                 struct ww_acquire_ctx *ctx)
 {
-#ifdef CONFIG_DEBUG_MUTEXES
+#ifdef DEBUG_WW_MUTEXES
        DEBUG_LOCKS_WARN_ON(!ctx->contending_lock);
 #endif
        return ww_mutex_lock_interruptible(lock, ctx);
@@ -322,7 +348,7 @@ extern void ww_mutex_unlock(struct ww_mutex *lock);
  */
 static inline int __must_check ww_mutex_trylock(struct ww_mutex *lock)
 {
-       return mutex_trylock(&lock->base);
+       return ww_mutex_base_trylock(&lock->base);
 }
 
 /***
@@ -335,7 +361,9 @@ static inline int __must_check ww_mutex_trylock(struct ww_mutex *lock)
  */
 static inline void ww_mutex_destroy(struct ww_mutex *lock)
 {
+#ifndef CONFIG_PREEMPT_RT
        mutex_destroy(&lock->base);
+#endif
 }
 
 /**
@@ -346,7 +374,7 @@ static inline void ww_mutex_destroy(struct ww_mutex *lock)
  */
 static inline bool ww_mutex_is_locked(struct ww_mutex *lock)
 {
-       return mutex_is_locked(&lock->base);
+       return ww_mutex_base_is_locked(&lock->base);
 }
 
 #endif
index a53e944..db4312e 100644 (file)
@@ -1230,6 +1230,7 @@ struct hci_dev *hci_alloc_dev(void);
 void hci_free_dev(struct hci_dev *hdev);
 int hci_register_dev(struct hci_dev *hdev);
 void hci_unregister_dev(struct hci_dev *hdev);
+void hci_cleanup_dev(struct hci_dev *hdev);
 int hci_suspend_dev(struct hci_dev *hdev);
 int hci_resume_dev(struct hci_dev *hdev);
 int hci_reset_dev(struct hci_dev *hdev);
index 69c9eab..1b9d75a 100644 (file)
@@ -293,7 +293,7 @@ static inline bool flow_action_has_entries(const struct flow_action *action)
 }
 
 /**
- * flow_action_has_one_action() - check if exactly one action is present
+ * flow_offload_has_one_action() - check if exactly one action is present
  * @action: tc filter flow offload action
  *
  * Returns true if exactly one action is present.
@@ -319,14 +319,12 @@ flow_action_mixed_hw_stats_check(const struct flow_action *action,
        if (flow_offload_has_one_action(action))
                return true;
 
-       if (action) {
-               flow_action_for_each(i, action_entry, action) {
-                       if (i && action_entry->hw_stats != last_hw_stats) {
-                               NL_SET_ERR_MSG_MOD(extack, "Mixing HW stats types for actions is not supported");
-                               return false;
-                       }
-                       last_hw_stats = action_entry->hw_stats;
+       flow_action_for_each(i, action_entry, action) {
+               if (i && action_entry->hw_stats != last_hw_stats) {
+                       NL_SET_ERR_MSG_MOD(extack, "Mixing HW stats types for actions is not supported");
+                       return false;
                }
+               last_hw_stats = action_entry->hw_stats;
        }
        return true;
 }
index 15b7fbe..c412dde 100644 (file)
@@ -267,7 +267,7 @@ static inline bool fib6_check_expired(const struct fib6_info *f6i)
        return false;
 }
 
-/* Function to safely get fn->sernum for passed in rt
+/* Function to safely get fn->fn_sernum for passed in rt
  * and store result in passed in cookie.
  * Return true if we can get cookie safely
  * Return false if not
@@ -282,7 +282,7 @@ static inline bool fib6_get_cookie_safe(const struct fib6_info *f6i,
 
        if (fn) {
                *cookie = fn->fn_sernum;
-               /* pairs with smp_wmb() in fib6_update_sernum_upto_root() */
+               /* pairs with smp_wmb() in __fib6_update_sernum_upto_root() */
                smp_rmb();
                status = true;
        }
index 625a38c..0bf09a9 100644 (file)
@@ -265,7 +265,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
 
 static inline unsigned int ip6_skb_dst_mtu(struct sk_buff *skb)
 {
-       int mtu;
+       unsigned int mtu;
 
        struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
                                inet6_sk(skb->sk) : NULL;
index c0f0a13..49aa79c 100644 (file)
 #include <linux/if_ether.h>
 
 /* Lengths of frame formats */
-#define LLC_PDU_LEN_I  4       /* header and 2 control bytes */
-#define LLC_PDU_LEN_S  4
-#define LLC_PDU_LEN_U  3       /* header and 1 control byte */
+#define LLC_PDU_LEN_I          4       /* header and 2 control bytes */
+#define LLC_PDU_LEN_S          4
+#define LLC_PDU_LEN_U          3       /* header and 1 control byte */
+/* header and 1 control byte and XID info */
+#define LLC_PDU_LEN_U_XID      (LLC_PDU_LEN_U + sizeof(struct llc_xid_info))
 /* Known SAP addresses */
 #define LLC_GLOBAL_SAP 0xFF
 #define LLC_NULL_SAP   0x00    /* not network-layer visible */
 #define LLC_PDU_TYPE_U_MASK    0x03    /* 8-bit control field */
 #define LLC_PDU_TYPE_MASK      0x03
 
-#define LLC_PDU_TYPE_I 0       /* first bit */
-#define LLC_PDU_TYPE_S 1       /* first two bits */
-#define LLC_PDU_TYPE_U 3       /* first two bits */
+#define LLC_PDU_TYPE_I         0       /* first bit */
+#define LLC_PDU_TYPE_S         1       /* first two bits */
+#define LLC_PDU_TYPE_U         3       /* first two bits */
+#define LLC_PDU_TYPE_U_XID     4       /* private type for detecting XID commands */
 
 #define LLC_PDU_TYPE_IS_I(pdu) \
        ((!(pdu->ctrl_1 & LLC_PDU_TYPE_I_MASK)) ? 1 : 0)
@@ -230,9 +233,18 @@ static inline struct llc_pdu_un *llc_pdu_un_hdr(struct sk_buff *skb)
 static inline void llc_pdu_header_init(struct sk_buff *skb, u8 type,
                                       u8 ssap, u8 dsap, u8 cr)
 {
-       const int hlen = type == LLC_PDU_TYPE_U ? 3 : 4;
+       int hlen = 4; /* default value for I and S types */
        struct llc_pdu_un *pdu;
 
+       switch (type) {
+       case LLC_PDU_TYPE_U:
+               hlen = 3;
+               break;
+       case LLC_PDU_TYPE_U_XID:
+               hlen = 6;
+               break;
+       }
+
        skb_push(skb, hlen);
        skb_reset_network_header(skb);
        pdu = llc_pdu_un_hdr(skb);
@@ -374,7 +386,10 @@ static inline void llc_pdu_init_as_xid_cmd(struct sk_buff *skb,
        xid_info->fmt_id = LLC_XID_FMT_ID;      /* 0x81 */
        xid_info->type   = svcs_supported;
        xid_info->rw     = rx_window << 1;      /* size of receive window */
-       skb_put(skb, sizeof(struct llc_xid_info));
+
+       /* no need to push/put since llc_pdu_header_init() has already
+        * pushed 3 + 3 bytes
+        */
 }
 
 /**
index 37e5300..fefd38d 100644 (file)
@@ -30,7 +30,6 @@ struct nf_tcp_net {
        u8 tcp_ignore_invalid_rst;
 #if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
        unsigned int offload_timeout;
-       unsigned int offload_pickup;
 #endif
 };
 
@@ -44,7 +43,6 @@ struct nf_udp_net {
        unsigned int timeouts[UDP_CT_MAX];
 #if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
        unsigned int offload_timeout;
-       unsigned int offload_pickup;
 #endif
 };
 
index e946366..1f4e181 100644 (file)
@@ -75,6 +75,7 @@ struct netns_xfrm {
 #endif
        spinlock_t              xfrm_state_lock;
        seqcount_spinlock_t     xfrm_state_hash_generation;
+       seqcount_spinlock_t     xfrm_policy_hash_generation;
 
        spinlock_t xfrm_policy_lock;
        struct mutex xfrm_cfg_mutex;
index ec78239..298a8d1 100644 (file)
@@ -337,6 +337,9 @@ int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
 
 /**
  * struct tcf_pkt_info - packet information
+ *
+ * @ptr: start of the pkt data
+ * @nexthdr: offset of the next header
  */
 struct tcf_pkt_info {
        unsigned char *         ptr;
@@ -355,6 +358,7 @@ struct tcf_ematch_ops;
  * @ops: the operations lookup table of the corresponding ematch module
  * @datalen: length of the ematch specific configuration data
  * @data: ematch specific data
+ * @net: the network namespace
  */
 struct tcf_ematch {
        struct tcf_ematch_ops * ops;
index e328c51..0509d2d 100644 (file)
@@ -31,6 +31,8 @@ struct psample_group *psample_group_get(struct net *net, u32 group_num);
 void psample_group_take(struct psample_group *group);
 void psample_group_put(struct psample_group *group);
 
+struct sk_buff;
+
 #if IS_ENABLED(CONFIG_PSAMPLE)
 
 void psample_sample_packet(struct psample_group *group, struct sk_buff *skb,
index 32fc4a3..651bba6 100644 (file)
@@ -984,6 +984,7 @@ struct sctp_transport {
        } cacc;
 
        struct {
+               __u32 last_rtx_chunks;
                __u16 pmtu;
                __u16 probe_size;
                __u16 probe_high;
@@ -1024,8 +1025,8 @@ bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu);
 void sctp_transport_immediate_rtx(struct sctp_transport *);
 void sctp_transport_dst_release(struct sctp_transport *t);
 void sctp_transport_dst_confirm(struct sctp_transport *t);
-void sctp_transport_pl_send(struct sctp_transport *t);
-void sctp_transport_pl_recv(struct sctp_transport *t);
+bool sctp_transport_pl_send(struct sctp_transport *t);
+bool sctp_transport_pl_recv(struct sctp_transport *t);
 
 
 /* This is the structure we use to queue packets as they come into
index f980256..491098a 100644 (file)
@@ -30,7 +30,7 @@ TRACE_EVENT(kyber_latency,
        ),
 
        TP_fast_assign(
-               __entry->dev            = disk_devt(queue_to_disk(q));
+               __entry->dev            = disk_devt(q->disk);
                strlcpy(__entry->domain, domain, sizeof(__entry->domain));
                strlcpy(__entry->type, type, sizeof(__entry->type));
                __entry->percentile     = percentile;
@@ -59,7 +59,7 @@ TRACE_EVENT(kyber_adjust,
        ),
 
        TP_fast_assign(
-               __entry->dev            = disk_devt(queue_to_disk(q));
+               __entry->dev            = disk_devt(q->disk);
                strlcpy(__entry->domain, domain, sizeof(__entry->domain));
                __entry->depth          = depth;
        ),
@@ -81,7 +81,7 @@ TRACE_EVENT(kyber_throttled,
        ),
 
        TP_fast_assign(
-               __entry->dev            = disk_devt(queue_to_disk(q));
+               __entry->dev            = disk_devt(q->disk);
                strlcpy(__entry->domain, domain, sizeof(__entry->domain));
        ),
 
index 390270e..f160484 100644 (file)
@@ -48,7 +48,9 @@
        {(unsigned long)__GFP_WRITE,            "__GFP_WRITE"},         \
        {(unsigned long)__GFP_RECLAIM,          "__GFP_RECLAIM"},       \
        {(unsigned long)__GFP_DIRECT_RECLAIM,   "__GFP_DIRECT_RECLAIM"},\
-       {(unsigned long)__GFP_KSWAPD_RECLAIM,   "__GFP_KSWAPD_RECLAIM"}\
+       {(unsigned long)__GFP_KSWAPD_RECLAIM,   "__GFP_KSWAPD_RECLAIM"},\
+       {(unsigned long)__GFP_ZEROTAGS,         "__GFP_ZEROTAGS"},      \
+       {(unsigned long)__GFP_SKIP_KASAN_POISON,"__GFP_SKIP_KASAN_POISON"}\
 
 #define show_gfp_flags(flags)                                          \
        (flags) ? __print_flags(flags, "|",                             \
diff --git a/include/uapi/linux/dvb/audio.h b/include/uapi/linux/dvb/audio.h
new file mode 100644 (file)
index 0000000..2f869da
--- /dev/null
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: LGPL-2.1+ WITH Linux-syscall-note */
+/*
+ * audio.h - DEPRECATED MPEG-TS audio decoder API
+ *
+ * NOTE: should not be used on future drivers
+ *
+ * Copyright (C) 2000 Ralph  Metzler <ralph@convergence.de>
+ *                  & Marcus Metzler <marcus@convergence.de>
+ *                    for convergence integrated media GmbH
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Lesser Public License
+ * as published by the Free Software Foundation; either version 2.1
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ *
+ */
+
+#ifndef _DVBAUDIO_H_
+#define _DVBAUDIO_H_
+
+#include <linux/types.h>
+
+typedef enum {
+       AUDIO_SOURCE_DEMUX, /* Select the demux as the main source */
+       AUDIO_SOURCE_MEMORY /* Select internal memory as the main source */
+} audio_stream_source_t;
+
+
+typedef enum {
+       AUDIO_STOPPED,      /* Device is stopped */
+       AUDIO_PLAYING,      /* Device is currently playing */
+       AUDIO_PAUSED        /* Device is paused */
+} audio_play_state_t;
+
+
+typedef enum {
+       AUDIO_STEREO,
+       AUDIO_MONO_LEFT,
+       AUDIO_MONO_RIGHT,
+       AUDIO_MONO,
+       AUDIO_STEREO_SWAPPED
+} audio_channel_select_t;
+
+
+typedef struct audio_mixer {
+       unsigned int volume_left;
+       unsigned int volume_right;
+  /* what else do we need? bass, pass-through, ... */
+} audio_mixer_t;
+
+
+typedef struct audio_status {
+       int                    AV_sync_state;  /* sync audio and video? */
+       int                    mute_state;     /* audio is muted */
+       audio_play_state_t     play_state;     /* current playback state */
+       audio_stream_source_t  stream_source;  /* current stream source */
+       audio_channel_select_t channel_select; /* currently selected channel */
+       int                    bypass_mode;    /* pass on audio data to */
+       audio_mixer_t          mixer_state;    /* current mixer state */
+} audio_status_t;                              /* separate decoder hardware */
+
+
+/* for GET_CAPABILITIES and SET_FORMAT, the latter should only set one bit */
+#define AUDIO_CAP_DTS    1
+#define AUDIO_CAP_LPCM   2
+#define AUDIO_CAP_MP1    4
+#define AUDIO_CAP_MP2    8
+#define AUDIO_CAP_MP3   16
+#define AUDIO_CAP_AAC   32
+#define AUDIO_CAP_OGG   64
+#define AUDIO_CAP_SDDS 128
+#define AUDIO_CAP_AC3  256
+
+#define AUDIO_STOP                 _IO('o', 1)
+#define AUDIO_PLAY                 _IO('o', 2)
+#define AUDIO_PAUSE                _IO('o', 3)
+#define AUDIO_CONTINUE             _IO('o', 4)
+#define AUDIO_SELECT_SOURCE        _IO('o', 5)
+#define AUDIO_SET_MUTE             _IO('o', 6)
+#define AUDIO_SET_AV_SYNC          _IO('o', 7)
+#define AUDIO_SET_BYPASS_MODE      _IO('o', 8)
+#define AUDIO_CHANNEL_SELECT       _IO('o', 9)
+#define AUDIO_GET_STATUS           _IOR('o', 10, audio_status_t)
+
+#define AUDIO_GET_CAPABILITIES     _IOR('o', 11, unsigned int)
+#define AUDIO_CLEAR_BUFFER         _IO('o',  12)
+#define AUDIO_SET_ID               _IO('o', 13)
+#define AUDIO_SET_MIXER            _IOW('o', 14, audio_mixer_t)
+#define AUDIO_SET_STREAMTYPE       _IO('o', 15)
+#define AUDIO_BILINGUAL_CHANNEL_SELECT _IO('o', 20)
+
+#endif /* _DVBAUDIO_H_ */
diff --git a/include/uapi/linux/dvb/osd.h b/include/uapi/linux/dvb/osd.h
new file mode 100644 (file)
index 0000000..858997c
--- /dev/null
@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: LGPL-2.1+ WITH Linux-syscall-note */
+/*
+ * osd.h - DEPRECATED On Screen Display API
+ *
+ * NOTE: should not be used on future drivers
+ *
+ * Copyright (C) 2001 Ralph  Metzler <ralph@convergence.de>
+ *                  & Marcus Metzler <marcus@convergence.de>
+ *                    for convergence integrated media GmbH
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Lesser Public License
+ * as published by the Free Software Foundation; either version 2.1
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ *
+ */
+
+#ifndef _DVBOSD_H_
+#define _DVBOSD_H_
+
+#include <linux/compiler.h>
+
+typedef enum {
+       /* All functions return -2 on "not open" */
+       OSD_Close = 1,  /* () */
+       /*
+        * Disables OSD and releases the buffers
+        * returns 0 on success
+        */
+       OSD_Open,       /* (x0,y0,x1,y1,BitPerPixel[2/4/8](color&0x0F),mix[0..15](color&0xF0)) */
+       /*
+        * Opens OSD with this size and bit depth
+        * returns 0 on success, -1 on DRAM allocation error, -2 on "already open"
+        */
+       OSD_Show,       /* () */
+       /*
+        * enables OSD mode
+        * returns 0 on success
+        */
+       OSD_Hide,       /* () */
+       /*
+        * disables OSD mode
+        * returns 0 on success
+        */
+       OSD_Clear,      /* () */
+       /*
+        * Sets all pixel to color 0
+        * returns 0 on success
+        */
+       OSD_Fill,       /* (color) */
+       /*
+        * Sets all pixel to color <col>
+        * returns 0 on success
+        */
+       OSD_SetColor,   /* (color,R{x0},G{y0},B{x1},opacity{y1}) */
+       /*
+        * set palette entry <num> to <r,g,b>, <mix> and <trans> apply
+        * R,G,B: 0..255
+        * R=Red, G=Green, B=Blue
+        * opacity=0:      pixel opacity 0% (only video pixel shows)
+        * opacity=1..254: pixel opacity as specified in header
+        * opacity=255:    pixel opacity 100% (only OSD pixel shows)
+        * returns 0 on success, -1 on error
+        */
+       OSD_SetPalette, /* (firstcolor{color},lastcolor{x0},data) */
+       /*
+        * Set a number of entries in the palette
+        * sets the entries "firstcolor" through "lastcolor" from the array "data"
+        * data has 4 byte for each color:
+        * R,G,B, and a opacity value: 0->transparent, 1..254->mix, 255->pixel
+        */
+       OSD_SetTrans,   /* (transparency{color}) */
+       /*
+        * Sets transparency of mixed pixel (0..15)
+        * returns 0 on success
+        */
+       OSD_SetPixel,   /* (x0,y0,color) */
+       /*
+        * sets pixel <x>,<y> to color number <col>
+        * returns 0 on success, -1 on error
+        */
+       OSD_GetPixel,   /* (x0,y0) */
+       /* returns color number of pixel <x>,<y>,  or -1 */
+       OSD_SetRow,     /* (x0,y0,x1,data) */
+       /*
+        * fills pixels x0,y through  x1,y with the content of data[]
+        * returns 0 on success, -1 on clipping all pixel (no pixel drawn)
+        */
+       OSD_SetBlock,   /* (x0,y0,x1,y1,increment{color},data) */
+       /*
+        * fills pixels x0,y0 through  x1,y1 with the content of data[]
+        * inc contains the width of one line in the data block,
+        * inc<=0 uses blockwidth as linewidth
+        * returns 0 on success, -1 on clipping all pixel
+        */
+       OSD_FillRow,    /* (x0,y0,x1,color) */
+       /*
+        * fills pixels x0,y through  x1,y with the color <col>
+        * returns 0 on success, -1 on clipping all pixel
+        */
+       OSD_FillBlock,  /* (x0,y0,x1,y1,color) */
+       /*
+        * fills pixels x0,y0 through  x1,y1 with the color <col>
+        * returns 0 on success, -1 on clipping all pixel
+        */
+       OSD_Line,       /* (x0,y0,x1,y1,color) */
+       /*
+        * draw a line from x0,y0 to x1,y1 with the color <col>
+        * returns 0 on success
+        */
+       OSD_Query,      /* (x0,y0,x1,y1,xasp{color}}), yasp=11 */
+       /*
+        * fills parameters with the picture dimensions and the pixel aspect ratio
+        * returns 0 on success
+        */
+       OSD_Test,       /* () */
+       /*
+        * draws a test picture. for debugging purposes only
+        * returns 0 on success
+        * TODO: remove "test" in final version
+        */
+       OSD_Text,       /* (x0,y0,size,color,text) */
+       OSD_SetWindow,  /* (x0) set window with number 0<x0<8 as current */
+       OSD_MoveWindow, /* move current window to (x0, y0) */
+       OSD_OpenRaw,    /* Open other types of OSD windows */
+} OSD_Command;
+
+typedef struct osd_cmd_s {
+       OSD_Command cmd;
+       int x0;
+       int y0;
+       int x1;
+       int y1;
+       int color;
+       void __user *data;
+} osd_cmd_t;
+
+/* OSD_OpenRaw: set 'color' to desired window type */
+typedef enum {
+       OSD_BITMAP1,           /* 1 bit bitmap */
+       OSD_BITMAP2,           /* 2 bit bitmap */
+       OSD_BITMAP4,           /* 4 bit bitmap */
+       OSD_BITMAP8,           /* 8 bit bitmap */
+       OSD_BITMAP1HR,         /* 1 Bit bitmap half resolution */
+       OSD_BITMAP2HR,         /* 2 bit bitmap half resolution */
+       OSD_BITMAP4HR,         /* 4 bit bitmap half resolution */
+       OSD_BITMAP8HR,         /* 8 bit bitmap half resolution */
+       OSD_YCRCB422,          /* 4:2:2 YCRCB Graphic Display */
+       OSD_YCRCB444,          /* 4:4:4 YCRCB Graphic Display */
+       OSD_YCRCB444HR,        /* 4:4:4 YCRCB graphic half resolution */
+       OSD_VIDEOTSIZE,        /* True Size Normal MPEG Video Display */
+       OSD_VIDEOHSIZE,        /* MPEG Video Display Half Resolution */
+       OSD_VIDEOQSIZE,        /* MPEG Video Display Quarter Resolution */
+       OSD_VIDEODSIZE,        /* MPEG Video Display Double Resolution */
+       OSD_VIDEOTHSIZE,       /* True Size MPEG Video Display Half Resolution */
+       OSD_VIDEOTQSIZE,       /* True Size MPEG Video Display Quarter Resolution*/
+       OSD_VIDEOTDSIZE,       /* True Size MPEG Video Display Double Resolution */
+       OSD_VIDEONSIZE,        /* Full Size MPEG Video Display */
+       OSD_CURSOR             /* Cursor */
+} osd_raw_window_t;
+
+typedef struct osd_cap_s {
+       int  cmd;
+#define OSD_CAP_MEMSIZE         1  /* memory size */
+       long val;
+} osd_cap_t;
+
+
+#define OSD_SEND_CMD            _IOW('o', 160, osd_cmd_t)
+#define OSD_GET_CAPABILITY      _IOR('o', 161, osd_cap_t)
+
+#endif
diff --git a/include/uapi/linux/dvb/video.h b/include/uapi/linux/dvb/video.h
new file mode 100644 (file)
index 0000000..179f1ec
--- /dev/null
@@ -0,0 +1,220 @@
+/* SPDX-License-Identifier: LGPL-2.1+ WITH Linux-syscall-note */
+/*
+ * video.h - DEPRECATED MPEG-TS video decoder API
+ *
+ * NOTE: should not be used on future drivers
+ *
+ * Copyright (C) 2000 Marcus Metzler <marcus@convergence.de>
+ *                  & Ralph  Metzler <ralph@convergence.de>
+ *                    for convergence integrated media GmbH
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation; either version 2.1
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ *
+ */
+
+#ifndef _UAPI_DVBVIDEO_H_
+#define _UAPI_DVBVIDEO_H_
+
+#include <linux/types.h>
+#ifndef __KERNEL__
+#include <time.h>
+#endif
+
+typedef enum {
+       VIDEO_FORMAT_4_3,     /* Select 4:3 format */
+       VIDEO_FORMAT_16_9,    /* Select 16:9 format. */
+       VIDEO_FORMAT_221_1    /* 2.21:1 */
+} video_format_t;
+
+
+typedef enum {
+       VIDEO_PAN_SCAN,       /* use pan and scan format */
+       VIDEO_LETTER_BOX,     /* use letterbox format */
+       VIDEO_CENTER_CUT_OUT  /* use center cut out format */
+} video_displayformat_t;
+
+typedef struct {
+       int w;
+       int h;
+       video_format_t aspect_ratio;
+} video_size_t;
+
+typedef enum {
+       VIDEO_SOURCE_DEMUX, /* Select the demux as the main source */
+       VIDEO_SOURCE_MEMORY /* If this source is selected, the stream
+                              comes from the user through the write
+                              system call */
+} video_stream_source_t;
+
+
+typedef enum {
+       VIDEO_STOPPED, /* Video is stopped */
+       VIDEO_PLAYING, /* Video is currently playing */
+       VIDEO_FREEZED  /* Video is freezed */
+} video_play_state_t;
+
+
+/* Decoder commands */
+#define VIDEO_CMD_PLAY        (0)
+#define VIDEO_CMD_STOP        (1)
+#define VIDEO_CMD_FREEZE      (2)
+#define VIDEO_CMD_CONTINUE    (3)
+
+/* Flags for VIDEO_CMD_FREEZE */
+#define VIDEO_CMD_FREEZE_TO_BLACK      (1 << 0)
+
+/* Flags for VIDEO_CMD_STOP */
+#define VIDEO_CMD_STOP_TO_BLACK                (1 << 0)
+#define VIDEO_CMD_STOP_IMMEDIATELY     (1 << 1)
+
+/* Play input formats: */
+/* The decoder has no special format requirements */
+#define VIDEO_PLAY_FMT_NONE         (0)
+/* The decoder requires full GOPs */
+#define VIDEO_PLAY_FMT_GOP          (1)
+
+/* The structure must be zeroed before use by the application
+   This ensures it can be extended safely in the future. */
+struct video_command {
+       __u32 cmd;
+       __u32 flags;
+       union {
+               struct {
+                       __u64 pts;
+               } stop;
+
+               struct {
+                       /* 0 or 1000 specifies normal speed,
+                          1 specifies forward single stepping,
+                          -1 specifies backward single stepping,
+                          >1: playback at speed/1000 of the normal speed,
+                          <-1: reverse playback at (-speed/1000) of the normal speed. */
+                       __s32 speed;
+                       __u32 format;
+               } play;
+
+               struct {
+                       __u32 data[16];
+               } raw;
+       };
+};
+
+/* FIELD_UNKNOWN can be used if the hardware does not know whether
+   the Vsync is for an odd, even or progressive (i.e. non-interlaced)
+   field. */
+#define VIDEO_VSYNC_FIELD_UNKNOWN      (0)
+#define VIDEO_VSYNC_FIELD_ODD          (1)
+#define VIDEO_VSYNC_FIELD_EVEN         (2)
+#define VIDEO_VSYNC_FIELD_PROGRESSIVE  (3)
+
+struct video_event {
+       __s32 type;
+#define VIDEO_EVENT_SIZE_CHANGED       1
+#define VIDEO_EVENT_FRAME_RATE_CHANGED 2
+#define VIDEO_EVENT_DECODER_STOPPED    3
+#define VIDEO_EVENT_VSYNC              4
+       /* unused, make sure to use atomic time for y2038 if it ever gets used */
+       long timestamp;
+       union {
+               video_size_t size;
+               unsigned int frame_rate;        /* in frames per 1000sec */
+               unsigned char vsync_field;      /* unknown/odd/even/progressive */
+       } u;
+};
+
+
+struct video_status {
+       int                   video_blank;   /* blank video on freeze? */
+       video_play_state_t    play_state;    /* current state of playback */
+       video_stream_source_t stream_source; /* current source (demux/memory) */
+       video_format_t        video_format;  /* current aspect ratio of stream*/
+       video_displayformat_t display_format;/* selected cropping mode */
+};
+
+
+struct video_still_picture {
+       char __user *iFrame;        /* pointer to a single iframe in memory */
+       __s32 size;
+};
+
+
+typedef __u16 video_attributes_t;
+/*   bits: descr. */
+/*   15-14 Video compression mode (0=MPEG-1, 1=MPEG-2) */
+/*   13-12 TV system (0=525/60, 1=625/50) */
+/*   11-10 Aspect ratio (0=4:3, 3=16:9) */
+/*    9- 8 permitted display mode on 4:3 monitor (0=both, 1=only pan-sca */
+/*    7    line 21-1 data present in GOP (1=yes, 0=no) */
+/*    6    line 21-2 data present in GOP (1=yes, 0=no) */
+/*    5- 3 source resolution (0=720x480/576, 1=704x480/576, 2=352x480/57 */
+/*    2    source letterboxed (1=yes, 0=no) */
+/*    0    film/camera mode (0=
+ *camera, 1=film (625/50 only)) */
+
+
+/* bit definitions for capabilities: */
+/* can the hardware decode MPEG1 and/or MPEG2? */
+#define VIDEO_CAP_MPEG1   1
+#define VIDEO_CAP_MPEG2   2
+/* can you send a system and/or program stream to video device?
+   (you still have to open the video and the audio device but only
+    send the stream to the video device) */
+#define VIDEO_CAP_SYS     4
+#define VIDEO_CAP_PROG    8
+/* can the driver also handle SPU, NAVI and CSS encoded data?
+   (CSS API is not present yet) */
+#define VIDEO_CAP_SPU    16
+#define VIDEO_CAP_NAVI   32
+#define VIDEO_CAP_CSS    64
+
+
+#define VIDEO_STOP                 _IO('o', 21)
+#define VIDEO_PLAY                 _IO('o', 22)
+#define VIDEO_FREEZE               _IO('o', 23)
+#define VIDEO_CONTINUE             _IO('o', 24)
+#define VIDEO_SELECT_SOURCE        _IO('o', 25)
+#define VIDEO_SET_BLANK            _IO('o', 26)
+#define VIDEO_GET_STATUS           _IOR('o', 27, struct video_status)
+#define VIDEO_GET_EVENT            _IOR('o', 28, struct video_event)
+#define VIDEO_SET_DISPLAY_FORMAT   _IO('o', 29)
+#define VIDEO_STILLPICTURE         _IOW('o', 30, struct video_still_picture)
+#define VIDEO_FAST_FORWARD         _IO('o', 31)
+#define VIDEO_SLOWMOTION           _IO('o', 32)
+#define VIDEO_GET_CAPABILITIES     _IOR('o', 33, unsigned int)
+#define VIDEO_CLEAR_BUFFER         _IO('o',  34)
+#define VIDEO_SET_STREAMTYPE       _IO('o', 36)
+#define VIDEO_SET_FORMAT           _IO('o', 37)
+#define VIDEO_GET_SIZE             _IOR('o', 55, video_size_t)
+
+/**
+ * VIDEO_GET_PTS
+ *
+ * Read the 33 bit presentation time stamp as defined
+ * in ITU T-REC-H.222.0 / ISO/IEC 13818-1.
+ *
+ * The PTS should belong to the currently played
+ * frame if possible, but may also be a value close to it
+ * like the PTS of the last decoded frame or the last PTS
+ * extracted by the PES parser.
+ */
+#define VIDEO_GET_PTS              _IOR('o', 57, __u64)
+
+/* Read the number of displayed frames since the decoder was started */
+#define VIDEO_GET_FRAME_COUNT     _IOR('o', 58, __u64)
+
+#define VIDEO_COMMAND             _IOWR('o', 59, struct video_command)
+#define VIDEO_TRY_COMMAND         _IOWR('o', 60, struct video_command)
+
+#endif /* _UAPI_DVBVIDEO_H_ */
index fbf9c5c..64553df 100644 (file)
@@ -51,6 +51,7 @@
 #define FAN_ENABLE_AUDIT       0x00000040
 
 /* Flags to determine fanotify event format */
+#define FAN_REPORT_PIDFD       0x00000080      /* Report pidfd for event->pid */
 #define FAN_REPORT_TID         0x00000100      /* event->pid is thread id */
 #define FAN_REPORT_FID         0x00000200      /* Report unique file id */
 #define FAN_REPORT_DIR_FID     0x00000400      /* Report unique directory id */
@@ -123,6 +124,7 @@ struct fanotify_event_metadata {
 #define FAN_EVENT_INFO_TYPE_FID                1
 #define FAN_EVENT_INFO_TYPE_DFID_NAME  2
 #define FAN_EVENT_INFO_TYPE_DFID       3
+#define FAN_EVENT_INFO_TYPE_PIDFD      4
 
 /* Variable length info record following event metadata */
 struct fanotify_event_info_header {
@@ -148,6 +150,15 @@ struct fanotify_event_info_fid {
        unsigned char handle[0];
 };
 
+/*
+ * This structure is used for info records of type FAN_EVENT_INFO_TYPE_PIDFD.
+ * It holds a pidfd for the pid that was responsible for generating an event.
+ */
+struct fanotify_event_info_pidfd {
+       struct fanotify_event_info_header hdr;
+       __s32 pidfd;
+};
+
 struct fanotify_response {
        __s32 fd;
        __u32 response;
@@ -160,6 +171,8 @@ struct fanotify_response {
 
 /* No fd set in event */
 #define FAN_NOFD       -1
+#define FAN_NOPIDFD    FAN_NOFD
+#define FAN_EPIDFD     -2
 
 /* Helper functions to deal with fanotify_event_metadata buffers */
 #define FAN_EVENT_METADATA_LEN (sizeof(struct fanotify_event_metadata))
index 4c32e97..bdf7b40 100644 (file)
@@ -184,6 +184,7 @@ struct fsxattr {
 #define BLKSECDISCARD _IO(0x12,125)
 #define BLKROTATIONAL _IO(0x12,126)
 #define BLKZEROOUT _IO(0x12,127)
+#define BLKGETDISKSEQ _IOR(0x12,128,__u64)
 /*
  * A jump here: 130-136 are reserved for zoned block devices
  * (see uapi/linux/blkzoned.h)
index e33997b..edc346a 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* SPDX-License-Identifier: LGPL-2.1 WITH Linux-syscall-note */
 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
 #ifndef _USR_IDXD_H_
 #define _USR_IDXD_H_
diff --git a/include/uapi/linux/ioprio.h b/include/uapi/linux/ioprio.h
new file mode 100644 (file)
index 0000000..f70f259
--- /dev/null
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_LINUX_IOPRIO_H
+#define _UAPI_LINUX_IOPRIO_H
+
+/*
+ * Gives us 8 prio classes with 13-bits of data for each class
+ */
+#define IOPRIO_CLASS_SHIFT     13
+#define IOPRIO_CLASS_MASK      0x07
+#define IOPRIO_PRIO_MASK       ((1UL << IOPRIO_CLASS_SHIFT) - 1)
+
+#define IOPRIO_PRIO_CLASS(ioprio)      \
+       (((ioprio) >> IOPRIO_CLASS_SHIFT) & IOPRIO_CLASS_MASK)
+#define IOPRIO_PRIO_DATA(ioprio)       ((ioprio) & IOPRIO_PRIO_MASK)
+#define IOPRIO_PRIO_VALUE(class, data) \
+       ((((class) & IOPRIO_CLASS_MASK) << IOPRIO_CLASS_SHIFT) | \
+        ((data) & IOPRIO_PRIO_MASK))
+
+/*
+ * These are the io priority groups as implemented by the BFQ and mq-deadline
+ * schedulers. RT is the realtime class, it always gets premium service. For
+ * ATA disks supporting NCQ IO priority, RT class IOs will be processed using
+ * high priority NCQ commands. BE is the best-effort scheduling class, the
+ * default for any process. IDLE is the idle scheduling class, it is only
+ * served when no one else is using the disk.
+ */
+enum {
+       IOPRIO_CLASS_NONE,
+       IOPRIO_CLASS_RT,
+       IOPRIO_CLASS_BE,
+       IOPRIO_CLASS_IDLE,
+};
+
+/*
+ * The RT and BE priority classes both support up to 8 priority levels.
+ */
+#define IOPRIO_NR_LEVELS       8
+#define IOPRIO_BE_NR           IOPRIO_NR_LEVELS
+
+enum {
+       IOPRIO_WHO_PROCESS = 1,
+       IOPRIO_WHO_PGRP,
+       IOPRIO_WHO_USER,
+};
+
+/*
+ * Fallback BE priority level.
+ */
+#define IOPRIO_NORM    4
+#define IOPRIO_BE_NORM IOPRIO_NORM
+
+#endif /* _UAPI_LINUX_IOPRIO_H */
diff --git a/include/uapi/linux/lightnvm.h b/include/uapi/linux/lightnvm.h
deleted file mode 100644 (file)
index 2745afd..0000000
+++ /dev/null
@@ -1,224 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright (C) 2015 CNEX Labs.  All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING.  If not, write to
- * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
- * USA.
- */
-
-#ifndef _UAPI_LINUX_LIGHTNVM_H
-#define _UAPI_LINUX_LIGHTNVM_H
-
-#ifdef __KERNEL__
-#include <linux/const.h>
-#else /* __KERNEL__ */
-#include <stdio.h>
-#include <sys/ioctl.h>
-#define DISK_NAME_LEN 32
-#endif /* __KERNEL__ */
-
-#include <linux/types.h>
-#include <linux/ioctl.h>
-
-#define NVM_TTYPE_NAME_MAX 48
-#define NVM_TTYPE_MAX 63
-#define NVM_MMTYPE_LEN 8
-
-#define NVM_CTRL_FILE "/dev/lightnvm/control"
-
-struct nvm_ioctl_info_tgt {
-       __u32 version[3];
-       __u32 reserved;
-       char tgtname[NVM_TTYPE_NAME_MAX];
-};
-
-struct nvm_ioctl_info {
-       __u32 version[3];       /* in/out - major, minor, patch */
-       __u16 tgtsize;          /* number of targets */
-       __u16 reserved16;       /* pad to 4K page */
-       __u32 reserved[12];
-       struct nvm_ioctl_info_tgt tgts[NVM_TTYPE_MAX];
-};
-
-enum {
-       NVM_DEVICE_ACTIVE = 1 << 0,
-};
-
-struct nvm_ioctl_device_info {
-       char devname[DISK_NAME_LEN];
-       char bmname[NVM_TTYPE_NAME_MAX];
-       __u32 bmversion[3];
-       __u32 flags;
-       __u32 reserved[8];
-};
-
-struct nvm_ioctl_get_devices {
-       __u32 nr_devices;
-       __u32 reserved[31];
-       struct nvm_ioctl_device_info info[31];
-};
-
-struct nvm_ioctl_create_simple {
-       __u32 lun_begin;
-       __u32 lun_end;
-};
-
-struct nvm_ioctl_create_extended {
-       __u16 lun_begin;
-       __u16 lun_end;
-       __u16 op;
-       __u16 rsv;
-};
-
-enum {
-       NVM_CONFIG_TYPE_SIMPLE = 0,
-       NVM_CONFIG_TYPE_EXTENDED = 1,
-};
-
-struct nvm_ioctl_create_conf {
-       __u32 type;
-       union {
-               struct nvm_ioctl_create_simple s;
-               struct nvm_ioctl_create_extended e;
-       };
-};
-
-enum {
-       NVM_TARGET_FACTORY = 1 << 0,    /* Init target in factory mode */
-};
-
-struct nvm_ioctl_create {
-       char dev[DISK_NAME_LEN];                /* open-channel SSD device */
-       char tgttype[NVM_TTYPE_NAME_MAX];       /* target type name */
-       char tgtname[DISK_NAME_LEN];            /* dev to expose target as */
-
-       __u32 flags;
-
-       struct nvm_ioctl_create_conf conf;
-};
-
-struct nvm_ioctl_remove {
-       char tgtname[DISK_NAME_LEN];
-
-       __u32 flags;
-};
-
-struct nvm_ioctl_dev_init {
-       char dev[DISK_NAME_LEN];                /* open-channel SSD device */
-       char mmtype[NVM_MMTYPE_LEN];            /* register to media manager */
-
-       __u32 flags;
-};
-
-enum {
-       NVM_FACTORY_ERASE_ONLY_USER     = 1 << 0, /* erase only blocks used as
-                                                  * host blks or grown blks */
-       NVM_FACTORY_RESET_HOST_BLKS     = 1 << 1, /* remove host blk marks */
-       NVM_FACTORY_RESET_GRWN_BBLKS    = 1 << 2, /* remove grown blk marks */
-       NVM_FACTORY_NR_BITS             = 1 << 3, /* stops here */
-};
-
-struct nvm_ioctl_dev_factory {
-       char dev[DISK_NAME_LEN];
-
-       __u32 flags;
-};
-
-struct nvm_user_vio {
-       __u8 opcode;
-       __u8 flags;
-       __u16 control;
-       __u16 nppas;
-       __u16 rsvd;
-       __u64 metadata;
-       __u64 addr;
-       __u64 ppa_list;
-       __u32 metadata_len;
-       __u32 data_len;
-       __u64 status;
-       __u32 result;
-       __u32 rsvd3[3];
-};
-
-struct nvm_passthru_vio {
-       __u8 opcode;
-       __u8 flags;
-       __u8 rsvd[2];
-       __u32 nsid;
-       __u32 cdw2;
-       __u32 cdw3;
-       __u64 metadata;
-       __u64 addr;
-       __u32 metadata_len;
-       __u32 data_len;
-       __u64 ppa_list;
-       __u16 nppas;
-       __u16 control;
-       __u32 cdw13;
-       __u32 cdw14;
-       __u32 cdw15;
-       __u64 status;
-       __u32 result;
-       __u32 timeout_ms;
-};
-
-/* The ioctl type, 'L', 0x20 - 0x2F documented in ioctl-number.txt */
-enum {
-       /* top level cmds */
-       NVM_INFO_CMD = 0x20,
-       NVM_GET_DEVICES_CMD,
-
-       /* device level cmds */
-       NVM_DEV_CREATE_CMD,
-       NVM_DEV_REMOVE_CMD,
-
-       /* Init a device to support LightNVM media managers */
-       NVM_DEV_INIT_CMD,
-
-       /* Factory reset device */
-       NVM_DEV_FACTORY_CMD,
-
-       /* Vector user I/O */
-       NVM_DEV_VIO_ADMIN_CMD = 0x41,
-       NVM_DEV_VIO_CMD = 0x42,
-       NVM_DEV_VIO_USER_CMD = 0x43,
-};
-
-#define NVM_IOCTL 'L' /* 0x4c */
-
-#define NVM_INFO               _IOWR(NVM_IOCTL, NVM_INFO_CMD, \
-                                               struct nvm_ioctl_info)
-#define NVM_GET_DEVICES                _IOR(NVM_IOCTL, NVM_GET_DEVICES_CMD, \
-                                               struct nvm_ioctl_get_devices)
-#define NVM_DEV_CREATE         _IOW(NVM_IOCTL, NVM_DEV_CREATE_CMD, \
-                                               struct nvm_ioctl_create)
-#define NVM_DEV_REMOVE         _IOW(NVM_IOCTL, NVM_DEV_REMOVE_CMD, \
-                                               struct nvm_ioctl_remove)
-#define NVM_DEV_INIT           _IOW(NVM_IOCTL, NVM_DEV_INIT_CMD, \
-                                               struct nvm_ioctl_dev_init)
-#define NVM_DEV_FACTORY                _IOW(NVM_IOCTL, NVM_DEV_FACTORY_CMD, \
-                                               struct nvm_ioctl_dev_factory)
-
-#define NVME_NVM_IOCTL_IO_VIO          _IOWR(NVM_IOCTL, NVM_DEV_VIO_USER_CMD, \
-                                               struct nvm_passthru_vio)
-#define NVME_NVM_IOCTL_ADMIN_VIO       _IOWR(NVM_IOCTL, NVM_DEV_VIO_ADMIN_CMD,\
-                                               struct nvm_passthru_vio)
-#define NVME_NVM_IOCTL_SUBMIT_VIO      _IOWR(NVM_IOCTL, NVM_DEV_VIO_CMD,\
-                                               struct nvm_user_vio)
-
-#define NVM_VERSION_MAJOR      1
-#define NVM_VERSION_MINOR      0
-#define NVM_VERSION_PATCHLEVEL 0
-
-#endif
index dc8b722..00a6069 100644 (file)
@@ -66,8 +66,11 @@ enum {
 #define NUD_NONE       0x00
 
 /* NUD_NOARP & NUD_PERMANENT are pseudostates, they never change
-   and make no address resolution or NUD.
-   NUD_PERMANENT also cannot be deleted by garbage collectors.
+ * and make no address resolution or NUD.
+ * NUD_PERMANENT also cannot be deleted by garbage collectors.
+ * When NTF_EXT_LEARNED is set for a bridge fdb entry the different cache entry
+ * states don't make sense and thus are ignored. Such entries don't age and
+ * can roam.
  */
 
 struct nda_cacheinfo {
index 912ec60..bbcd285 100644 (file)
@@ -43,6 +43,15 @@ enum nfnl_hook_chain_info_attributes {
 };
 #define NFNLA_HOOK_INFO_MAX (__NFNLA_HOOK_INFO_MAX - 1)
 
+enum nfnl_hook_chain_desc_attributes {
+       NFNLA_CHAIN_UNSPEC,
+       NFNLA_CHAIN_TABLE,
+       NFNLA_CHAIN_FAMILY,
+       NFNLA_CHAIN_NAME,
+       __NFNLA_CHAIN_MAX,
+};
+#define NFNLA_CHAIN_MAX (__NFNLA_CHAIN_MAX - 1)
+
 /**
  * enum nfnl_hook_chaintype - chain type
  *
index 967d9c5..964c41e 100644 (file)
@@ -213,6 +213,7 @@ struct prctl_mm_map {
 /* Speculation control variants */
 # define PR_SPEC_STORE_BYPASS          0
 # define PR_SPEC_INDIRECT_BRANCH       1
+# define PR_SPEC_L1D_FLUSH             2
 /* Return and control values for PR_SET/GET_SPECULATION_CTRL */
 # define PR_SPEC_NOT_AFFECTED          0
 # define PR_SPEC_PRCTL                 (1UL << 0)
index 74aede8..b691d68 100644 (file)
@@ -432,10 +432,6 @@ retry:
                printk("Please append a correct \"root=\" boot option; here are the available partitions:\n");
 
                printk_all_partitions();
-#ifdef CONFIG_DEBUG_BLOCK_EXT_DEVT
-               printk("DEBUG_BLOCK_EXT_DEVT is enabled, you need to specify "
-                      "explicit textual name for \"root=\" boot option.\n");
-#endif
                panic("VFS: Unable to mount root fs on %s", b);
        }
        if (!(flags & SB_RDONLY)) {
index 562f2ef..2d02406 100644 (file)
@@ -80,6 +80,7 @@ struct task_struct init_task
        .normal_prio    = MAX_PRIO - 20,
        .policy         = SCHED_NORMAL,
        .cpus_ptr       = &init_task.cpus_mask,
+       .user_cpus_ptr  = NULL,
        .cpus_mask      = CPU_MASK_ALL,
        .nr_cpus_allowed= NR_CPUS,
        .mm             = NULL,
index f5b8246..8d97aba 100644 (file)
@@ -397,6 +397,12 @@ static int __init bootconfig_params(char *param, char *val,
        return 0;
 }
 
+static int __init warn_bootconfig(char *str)
+{
+       /* The 'bootconfig' has been handled by bootconfig_params(). */
+       return 0;
+}
+
 static void __init setup_boot_config(void)
 {
        static char tmp_cmdline[COMMAND_LINE_SIZE] __initdata;
@@ -475,9 +481,8 @@ static int __init warn_bootconfig(char *str)
        pr_warn("WARNING: 'bootconfig' found on the kernel command line but CONFIG_BOOT_CONFIG is not set.\n");
        return 0;
 }
-early_param("bootconfig", warn_bootconfig);
-
 #endif
+early_param("bootconfig", warn_bootconfig);
 
 /* Change NUL term back to "=", to make "param" the whole string. */
 static void __init repair_env_string(char *param, char *val)
index 3de8fd1..4198f02 100644 (file)
@@ -251,7 +251,7 @@ config ARCH_USE_QUEUED_RWLOCKS
 
 config QUEUED_RWLOCKS
        def_bool y if ARCH_USE_QUEUED_RWLOCKS
-       depends on SMP
+       depends on SMP && !PREEMPT_RT
 
 config ARCH_HAS_MMIOWB
        bool
index 9b15774..0a28a80 100644 (file)
@@ -32,6 +32,8 @@
 #include <linux/perf_event.h>
 #include <linux/extable.h>
 #include <linux/log2.h>
+
+#include <asm/barrier.h>
 #include <asm/unaligned.h>
 
 /* Registers */
@@ -1360,11 +1362,13 @@ u64 __weak bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
 }
 
 /**
- *     __bpf_prog_run - run eBPF program on a given context
+ *     ___bpf_prog_run - run eBPF program on a given context
  *     @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
  *     @insn: is the array of eBPF instructions
  *
  * Decode and execute eBPF instructions.
+ *
+ * Return: whatever value is in %BPF_R0 at program exit
  */
 static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
 {
@@ -1377,6 +1381,7 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
                /* Non-UAPI available opcodes. */
                [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
                [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
+               [BPF_ST  | BPF_NOSPEC] = &&ST_NOSPEC,
                [BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B,
                [BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
                [BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
@@ -1621,7 +1626,21 @@ out:
        COND_JMP(s, JSGE, >=)
        COND_JMP(s, JSLE, <=)
 #undef COND_JMP
-       /* STX and ST and LDX*/
+       /* ST, STX and LDX*/
+       ST_NOSPEC:
+               /* Speculation barrier for mitigating Speculative Store Bypass.
+                * In case of arm64, we rely on the firmware mitigation as
+                * controlled via the ssbd kernel parameter. Whenever the
+                * mitigation is enabled, it works for all of the kernel code
+                * with no need to provide any additional instructions here.
+                * In case of x86, we use 'lfence' insn for mitigation. We
+                * reuse preexisting logic from Spectre v1 mitigation that
+                * happens to produce the required code on x86 for v4 as well.
+                */
+#ifdef CONFIG_X86
+               barrier_nospec();
+#endif
+               CONT;
 #define LDST(SIZEOP, SIZE)                                             \
        STX_MEM_##SIZEOP:                                               \
                *(SIZE *)(unsigned long) (DST + insn->off) = SRC;       \
@@ -1861,6 +1880,9 @@ static void bpf_prog_select_func(struct bpf_prog *fp)
  *
  * Try to JIT eBPF program, if JIT is not available, use interpreter.
  * The BPF program will be executed via BPF_PROG_RUN() macro.
+ *
+ * Return: the &fp argument along with &err set to 0 for success or
+ * a negative errno code on failure
  */
 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
 {
index bbfc6bb..ca3cd9a 100644 (file)
@@ -206,15 +206,17 @@ void print_bpf_insn(const struct bpf_insn_cbs *cbs,
                        verbose(cbs->private_data, "BUG_%02x\n", insn->code);
                }
        } else if (class == BPF_ST) {
-               if (BPF_MODE(insn->code) != BPF_MEM) {
+               if (BPF_MODE(insn->code) == BPF_MEM) {
+                       verbose(cbs->private_data, "(%02x) *(%s *)(r%d %+d) = %d\n",
+                               insn->code,
+                               bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
+                               insn->dst_reg,
+                               insn->off, insn->imm);
+               } else if (BPF_MODE(insn->code) == 0xc0 /* BPF_NOSPEC, no UAPI */) {
+                       verbose(cbs->private_data, "(%02x) nospec\n", insn->code);
+               } else {
                        verbose(cbs->private_data, "BUG_st_%02x\n", insn->code);
-                       return;
                }
-               verbose(cbs->private_data, "(%02x) *(%s *)(r%d %+d) = %d\n",
-                       insn->code,
-                       bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
-                       insn->dst_reg,
-                       insn->off, insn->imm);
        } else if (class == BPF_LDX) {
                if (BPF_MODE(insn->code) != BPF_MEM) {
                        verbose(cbs->private_data, "BUG_ldx_%02x\n", insn->code);
index 72c58cc..9c011f3 100644 (file)
@@ -1565,8 +1565,8 @@ alloc:
        /* We cannot do copy_from_user or copy_to_user inside
         * the rcu_read_lock. Allocate enough space here.
         */
-       keys = kvmalloc(key_size * bucket_size, GFP_USER | __GFP_NOWARN);
-       values = kvmalloc(value_size * bucket_size, GFP_USER | __GFP_NOWARN);
+       keys = kvmalloc_array(key_size, bucket_size, GFP_USER | __GFP_NOWARN);
+       values = kvmalloc_array(value_size, bucket_size, GFP_USER | __GFP_NOWARN);
        if (!keys || !values) {
                ret = -ENOMEM;
                goto after_loop;
index 62cf003..55f83ea 100644 (file)
@@ -353,9 +353,15 @@ const struct bpf_func_proto bpf_jiffies64_proto = {
 #ifdef CONFIG_CGROUPS
 BPF_CALL_0(bpf_get_current_cgroup_id)
 {
-       struct cgroup *cgrp = task_dfl_cgroup(current);
+       struct cgroup *cgrp;
+       u64 cgrp_id;
 
-       return cgroup_id(cgrp);
+       rcu_read_lock();
+       cgrp = task_dfl_cgroup(current);
+       cgrp_id = cgroup_id(cgrp);
+       rcu_read_unlock();
+
+       return cgrp_id;
 }
 
 const struct bpf_func_proto bpf_get_current_cgroup_id_proto = {
@@ -366,13 +372,17 @@ const struct bpf_func_proto bpf_get_current_cgroup_id_proto = {
 
 BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level)
 {
-       struct cgroup *cgrp = task_dfl_cgroup(current);
+       struct cgroup *cgrp;
        struct cgroup *ancestor;
+       u64 cgrp_id;
 
+       rcu_read_lock();
+       cgrp = task_dfl_cgroup(current);
        ancestor = cgroup_ancestor(cgrp, ancestor_level);
-       if (!ancestor)
-               return 0;
-       return cgroup_id(ancestor);
+       cgrp_id = ancestor ? cgroup_id(ancestor) : 0;
+       rcu_read_unlock();
+
+       return cgrp_id;
 }
 
 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = {
@@ -397,8 +407,8 @@ BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags)
        void *ptr;
        int i;
 
-       for (i = 0; i < BPF_CGROUP_STORAGE_NEST_MAX; i++) {
-               if (unlikely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current))
+       for (i = BPF_CGROUP_STORAGE_NEST_MAX - 1; i >= 0; i--) {
+               if (likely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current))
                        continue;
 
                storage = this_cpu_read(bpf_cgroup_storage_info[i].storage[stype]);
@@ -1070,12 +1080,12 @@ bpf_base_func_proto(enum bpf_func_id func_id)
        case BPF_FUNC_probe_read_user:
                return &bpf_probe_read_user_proto;
        case BPF_FUNC_probe_read_kernel:
-               return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+               return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
                       NULL : &bpf_probe_read_kernel_proto;
        case BPF_FUNC_probe_read_user_str:
                return &bpf_probe_read_user_str_proto;
        case BPF_FUNC_probe_read_kernel_str:
-               return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+               return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
                       NULL : &bpf_probe_read_kernel_str_proto;
        case BPF_FUNC_snprintf_btf:
                return &bpf_snprintf_btf_proto;
index 9de3c9c..49f07e2 100644 (file)
@@ -2610,6 +2610,19 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
        cur = env->cur_state->frame[env->cur_state->curframe];
        if (value_regno >= 0)
                reg = &cur->regs[value_regno];
+       if (!env->bypass_spec_v4) {
+               bool sanitize = reg && is_spillable_regtype(reg->type);
+
+               for (i = 0; i < size; i++) {
+                       if (state->stack[spi].slot_type[i] == STACK_INVALID) {
+                               sanitize = true;
+                               break;
+                       }
+               }
+
+               if (sanitize)
+                       env->insn_aux_data[insn_idx].sanitize_stack_spill = true;
+       }
 
        if (reg && size == BPF_REG_SIZE && register_is_bounded(reg) &&
            !register_is_null(reg) && env->bpf_capable) {
@@ -2632,47 +2645,10 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
                        verbose(env, "invalid size of register spill\n");
                        return -EACCES;
                }
-
                if (state != cur && reg->type == PTR_TO_STACK) {
                        verbose(env, "cannot spill pointers to stack into stack frame of the caller\n");
                        return -EINVAL;
                }
-
-               if (!env->bypass_spec_v4) {
-                       bool sanitize = false;
-
-                       if (state->stack[spi].slot_type[0] == STACK_SPILL &&
-                           register_is_const(&state->stack[spi].spilled_ptr))
-                               sanitize = true;
-                       for (i = 0; i < BPF_REG_SIZE; i++)
-                               if (state->stack[spi].slot_type[i] == STACK_MISC) {
-                                       sanitize = true;
-                                       break;
-                               }
-                       if (sanitize) {
-                               int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off;
-                               int soff = (-spi - 1) * BPF_REG_SIZE;
-
-                               /* detected reuse of integer stack slot with a pointer
-                                * which means either llvm is reusing stack slot or
-                                * an attacker is trying to exploit CVE-2018-3639
-                                * (speculative store bypass)
-                                * Have to sanitize that slot with preemptive
-                                * store of zero.
-                                */
-                               if (*poff && *poff != soff) {
-                                       /* disallow programs where single insn stores
-                                        * into two different stack slots, since verifier
-                                        * cannot sanitize them
-                                        */
-                                       verbose(env,
-                                               "insn %d cannot access two stack slots fp%d and fp%d",
-                                               insn_idx, *poff, soff);
-                                       return -EINVAL;
-                               }
-                               *poff = soff;
-                       }
-               }
                save_register_state(state, spi, reg);
        } else {
                u8 type = STACK_MISC;
@@ -5174,8 +5150,6 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
        case BPF_MAP_TYPE_RINGBUF:
                if (func_id != BPF_FUNC_ringbuf_output &&
                    func_id != BPF_FUNC_ringbuf_reserve &&
-                   func_id != BPF_FUNC_ringbuf_submit &&
-                   func_id != BPF_FUNC_ringbuf_discard &&
                    func_id != BPF_FUNC_ringbuf_query)
                        goto error;
                break;
@@ -5284,6 +5258,12 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
                if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
                        goto error;
                break;
+       case BPF_FUNC_ringbuf_output:
+       case BPF_FUNC_ringbuf_reserve:
+       case BPF_FUNC_ringbuf_query:
+               if (map->map_type != BPF_MAP_TYPE_RINGBUF)
+                       goto error;
+               break;
        case BPF_FUNC_get_stackid:
                if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
                        goto error;
@@ -6561,6 +6541,12 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
                alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0;
                alu_state |= ptr_is_dst_reg ?
                             BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
+
+               /* Limit pruning on unknown scalars to enable deep search for
+                * potential masking differences from other program paths.
+                */
+               if (!off_is_imm)
+                       env->explore_alu_limits = true;
        }
 
        err = update_alu_sanitation_state(aux, alu_state, alu_limit);
@@ -9936,8 +9922,8 @@ next:
 }
 
 /* Returns true if (rold safe implies rcur safe) */
-static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
-                   struct bpf_id_pair *idmap)
+static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
+                   struct bpf_reg_state *rcur, struct bpf_id_pair *idmap)
 {
        bool equal;
 
@@ -9963,6 +9949,8 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
                return false;
        switch (rold->type) {
        case SCALAR_VALUE:
+               if (env->explore_alu_limits)
+                       return false;
                if (rcur->type == SCALAR_VALUE) {
                        if (!rold->precise && !rcur->precise)
                                return true;
@@ -10053,9 +10041,8 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
        return false;
 }
 
-static bool stacksafe(struct bpf_func_state *old,
-                     struct bpf_func_state *cur,
-                     struct bpf_id_pair *idmap)
+static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
+                     struct bpf_func_state *cur, struct bpf_id_pair *idmap)
 {
        int i, spi;
 
@@ -10100,9 +10087,8 @@ static bool stacksafe(struct bpf_func_state *old,
                        continue;
                if (old->stack[spi].slot_type[0] != STACK_SPILL)
                        continue;
-               if (!regsafe(&old->stack[spi].spilled_ptr,
-                            &cur->stack[spi].spilled_ptr,
-                            idmap))
+               if (!regsafe(env, &old->stack[spi].spilled_ptr,
+                            &cur->stack[spi].spilled_ptr, idmap))
                        /* when explored and current stack slot are both storing
                         * spilled registers, check that stored pointers types
                         * are the same as well.
@@ -10159,10 +10145,11 @@ static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_stat
 
        memset(env->idmap_scratch, 0, sizeof(env->idmap_scratch));
        for (i = 0; i < MAX_BPF_REG; i++)
-               if (!regsafe(&old->regs[i], &cur->regs[i], env->idmap_scratch))
+               if (!regsafe(env, &old->regs[i], &cur->regs[i],
+                            env->idmap_scratch))
                        return false;
 
-       if (!stacksafe(old, cur, env->idmap_scratch))
+       if (!stacksafe(env, old, cur, env->idmap_scratch))
                return false;
 
        if (!refsafe(old, cur))
@@ -11680,6 +11667,7 @@ static void sanitize_dead_code(struct bpf_verifier_env *env)
                if (aux_data[i].seen)
                        continue;
                memcpy(insn + i, &trap, sizeof(trap));
+               aux_data[i].zext_dst = false;
        }
 }
 
@@ -11906,35 +11894,33 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
 
        for (i = 0; i < insn_cnt; i++, insn++) {
                bpf_convert_ctx_access_t convert_ctx_access;
+               bool ctx_access;
 
                if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) ||
                    insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
                    insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
-                   insn->code == (BPF_LDX | BPF_MEM | BPF_DW))
+                   insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) {
                        type = BPF_READ;
-               else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
-                        insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
-                        insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
-                        insn->code == (BPF_STX | BPF_MEM | BPF_DW))
+                       ctx_access = true;
+               } else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
+                          insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
+                          insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
+                          insn->code == (BPF_STX | BPF_MEM | BPF_DW) ||
+                          insn->code == (BPF_ST | BPF_MEM | BPF_B) ||
+                          insn->code == (BPF_ST | BPF_MEM | BPF_H) ||
+                          insn->code == (BPF_ST | BPF_MEM | BPF_W) ||
+                          insn->code == (BPF_ST | BPF_MEM | BPF_DW)) {
                        type = BPF_WRITE;
-               else
+                       ctx_access = BPF_CLASS(insn->code) == BPF_STX;
+               } else {
                        continue;
+               }
 
                if (type == BPF_WRITE &&
-                   env->insn_aux_data[i + delta].sanitize_stack_off) {
+                   env->insn_aux_data[i + delta].sanitize_stack_spill) {
                        struct bpf_insn patch[] = {
-                               /* Sanitize suspicious stack slot with zero.
-                                * There are no memory dependencies for this store,
-                                * since it's only using frame pointer and immediate
-                                * constant of zero
-                                */
-                               BPF_ST_MEM(BPF_DW, BPF_REG_FP,
-                                          env->insn_aux_data[i + delta].sanitize_stack_off,
-                                          0),
-                               /* the original STX instruction will immediately
-                                * overwrite the same stack slot with appropriate value
-                                */
                                *insn,
+                               BPF_ST_NOSPEC(),
                        };
 
                        cnt = ARRAY_SIZE(patch);
@@ -11948,6 +11934,9 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
                        continue;
                }
 
+               if (!ctx_access)
+                       continue;
+
                switch (env->insn_aux_data[i + delta].ptr_type) {
                case PTR_TO_CTX:
                        if (!ops->convert_ctx_access)
@@ -12752,37 +12741,6 @@ static void free_states(struct bpf_verifier_env *env)
        }
 }
 
-/* The verifier is using insn_aux_data[] to store temporary data during
- * verification and to store information for passes that run after the
- * verification like dead code sanitization. do_check_common() for subprogram N
- * may analyze many other subprograms. sanitize_insn_aux_data() clears all
- * temporary data after do_check_common() finds that subprogram N cannot be
- * verified independently. pass_cnt counts the number of times
- * do_check_common() was run and insn->aux->seen tells the pass number
- * insn_aux_data was touched. These variables are compared to clear temporary
- * data from failed pass. For testing and experiments do_check_common() can be
- * run multiple times even when prior attempt to verify is unsuccessful.
- *
- * Note that special handling is needed on !env->bypass_spec_v1 if this is
- * ever called outside of error path with subsequent program rejection.
- */
-static void sanitize_insn_aux_data(struct bpf_verifier_env *env)
-{
-       struct bpf_insn *insn = env->prog->insnsi;
-       struct bpf_insn_aux_data *aux;
-       int i, class;
-
-       for (i = 0; i < env->prog->len; i++) {
-               class = BPF_CLASS(insn[i].code);
-               if (class != BPF_LDX && class != BPF_STX)
-                       continue;
-               aux = &env->insn_aux_data[i];
-               if (aux->seen != env->pass_cnt)
-                       continue;
-               memset(aux, 0, offsetof(typeof(*aux), orig_idx));
-       }
-}
-
 static int do_check_common(struct bpf_verifier_env *env, int subprog)
 {
        bool pop_log = !(env->log.level & BPF_LOG_LEVEL2);
@@ -12859,9 +12817,6 @@ out:
        if (!ret && pop_log)
                bpf_vlog_reset(&env->log, 0);
        free_states(env);
-       if (ret)
-               /* clean aux data in case subprog was rejected */
-               sanitize_insn_aux_data(env);
        return ret;
 }
 
index e17a566..9594cfd 100644 (file)
@@ -248,9 +248,9 @@ static inline cfi_check_fn find_shadow_check_fn(unsigned long ptr)
 {
        cfi_check_fn fn;
 
-       rcu_read_lock_sched();
+       rcu_read_lock_sched_notrace();
        fn = ptr_to_check_fn(rcu_dereference_sched(cfi_shadow), ptr);
-       rcu_read_unlock_sched();
+       rcu_read_unlock_sched_notrace();
 
        return fn;
 }
@@ -269,11 +269,11 @@ static inline cfi_check_fn find_module_check_fn(unsigned long ptr)
        cfi_check_fn fn = NULL;
        struct module *mod;
 
-       rcu_read_lock_sched();
+       rcu_read_lock_sched_notrace();
        mod = __module_address(ptr);
        if (mod)
                fn = mod->cfi_check;
-       rcu_read_unlock_sched();
+       rcu_read_unlock_sched_notrace();
 
        return fn;
 }
index adb5190..6500cbe 100644 (file)
@@ -372,18 +372,29 @@ static inline bool is_in_v2_mode(void)
 }
 
 /*
- * Return in pmask the portion of a cpusets's cpus_allowed that
- * are online.  If none are online, walk up the cpuset hierarchy
- * until we find one that does have some online cpus.
+ * Return in pmask the portion of a task's cpusets's cpus_allowed that
+ * are online and are capable of running the task.  If none are found,
+ * walk up the cpuset hierarchy until we find one that does have some
+ * appropriate cpus.
  *
  * One way or another, we guarantee to return some non-empty subset
  * of cpu_online_mask.
  *
  * Call with callback_lock or cpuset_mutex held.
  */
-static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask)
+static void guarantee_online_cpus(struct task_struct *tsk,
+                                 struct cpumask *pmask)
 {
-       while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask)) {
+       const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
+       struct cpuset *cs;
+
+       if (WARN_ON(!cpumask_and(pmask, possible_mask, cpu_online_mask)))
+               cpumask_copy(pmask, cpu_online_mask);
+
+       rcu_read_lock();
+       cs = task_cs(tsk);
+
+       while (!cpumask_intersects(cs->effective_cpus, pmask)) {
                cs = parent_cs(cs);
                if (unlikely(!cs)) {
                        /*
@@ -393,11 +404,13 @@ static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask)
                         * cpuset's effective_cpus is on its way to be
                         * identical to cpu_online_mask.
                         */
-                       cpumask_copy(pmask, cpu_online_mask);
-                       return;
+                       goto out_unlock;
                }
        }
-       cpumask_and(pmask, cs->effective_cpus, cpu_online_mask);
+       cpumask_and(pmask, pmask, cs->effective_cpus);
+
+out_unlock:
+       rcu_read_unlock();
 }
 
 /*
@@ -2199,15 +2212,13 @@ static void cpuset_attach(struct cgroup_taskset *tset)
 
        percpu_down_write(&cpuset_rwsem);
 
-       /* prepare for attach */
-       if (cs == &top_cpuset)
-               cpumask_copy(cpus_attach, cpu_possible_mask);
-       else
-               guarantee_online_cpus(cs, cpus_attach);
-
        guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
 
        cgroup_taskset_for_each(task, css, tset) {
+               if (cs != &top_cpuset)
+                       guarantee_online_cpus(task, cpus_attach);
+               else
+                       cpumask_copy(cpus_attach, task_cpu_possible_mask(task));
                /*
                 * can_attach beforehand should guarantee that this doesn't
                 * fail.  TODO: have a better way to handle failure here
@@ -3302,9 +3313,7 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
        unsigned long flags;
 
        spin_lock_irqsave(&callback_lock, flags);
-       rcu_read_lock();
-       guarantee_online_cpus(task_cs(tsk), pmask);
-       rcu_read_unlock();
+       guarantee_online_cpus(tsk, pmask);
        spin_unlock_irqrestore(&callback_lock, flags);
 }
 
@@ -3318,13 +3327,22 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
  * which will not contain a sane cpumask during cases such as cpu hotplugging.
  * This is the absolute last resort for the scheduler and it is only used if
  * _every_ other avenue has been traveled.
+ *
+ * Returns true if the affinity of @tsk was changed, false otherwise.
  **/
 
-void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
+bool cpuset_cpus_allowed_fallback(struct task_struct *tsk)
 {
+       const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
+       const struct cpumask *cs_mask;
+       bool changed = false;
+
        rcu_read_lock();
-       do_set_cpus_allowed(tsk, is_in_v2_mode() ?
-               task_cs(tsk)->cpus_allowed : cpu_possible_mask);
+       cs_mask = task_cs(tsk)->cpus_allowed;
+       if (is_in_v2_mode() && cpumask_subset(cs_mask, possible_mask)) {
+               do_set_cpus_allowed(tsk, cs_mask);
+               changed = true;
+       }
        rcu_read_unlock();
 
        /*
@@ -3344,6 +3362,7 @@ void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
         * select_fallback_rq() will fix things ups and set cpu_possible_mask
         * if required.
         */
+       return changed;
 }
 
 void __init cpuset_init_current_mems_allowed(void)
index 7f0e589..b264ab5 100644 (file)
@@ -347,19 +347,20 @@ static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu)
 }
 
 static struct cgroup_rstat_cpu *
-cgroup_base_stat_cputime_account_begin(struct cgroup *cgrp)
+cgroup_base_stat_cputime_account_begin(struct cgroup *cgrp, unsigned long *flags)
 {
        struct cgroup_rstat_cpu *rstatc;
 
        rstatc = get_cpu_ptr(cgrp->rstat_cpu);
-       u64_stats_update_begin(&rstatc->bsync);
+       *flags = u64_stats_update_begin_irqsave(&rstatc->bsync);
        return rstatc;
 }
 
 static void cgroup_base_stat_cputime_account_end(struct cgroup *cgrp,
-                                                struct cgroup_rstat_cpu *rstatc)
+                                                struct cgroup_rstat_cpu *rstatc,
+                                                unsigned long flags)
 {
-       u64_stats_update_end(&rstatc->bsync);
+       u64_stats_update_end_irqrestore(&rstatc->bsync, flags);
        cgroup_rstat_updated(cgrp, smp_processor_id());
        put_cpu_ptr(rstatc);
 }
@@ -367,18 +368,20 @@ static void cgroup_base_stat_cputime_account_end(struct cgroup *cgrp,
 void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec)
 {
        struct cgroup_rstat_cpu *rstatc;
+       unsigned long flags;
 
-       rstatc = cgroup_base_stat_cputime_account_begin(cgrp);
+       rstatc = cgroup_base_stat_cputime_account_begin(cgrp, &flags);
        rstatc->bstat.cputime.sum_exec_runtime += delta_exec;
-       cgroup_base_stat_cputime_account_end(cgrp, rstatc);
+       cgroup_base_stat_cputime_account_end(cgrp, rstatc, flags);
 }
 
 void __cgroup_account_cputime_field(struct cgroup *cgrp,
                                    enum cpu_usage_stat index, u64 delta_exec)
 {
        struct cgroup_rstat_cpu *rstatc;
+       unsigned long flags;
 
-       rstatc = cgroup_base_stat_cputime_account_begin(cgrp);
+       rstatc = cgroup_base_stat_cputime_account_begin(cgrp, &flags);
 
        switch (index) {
        case CPUTIME_USER:
@@ -394,7 +397,7 @@ void __cgroup_account_cputime_field(struct cgroup *cgrp,
                break;
        }
 
-       cgroup_base_stat_cputime_account_end(cgrp, rstatc);
+       cgroup_base_stat_cputime_account_end(cgrp, rstatc, flags);
 }
 
 /*
index 804b847..192e43a 100644 (file)
 #include "smpboot.h"
 
 /**
- * cpuhp_cpu_state - Per cpu hotplug state storage
+ * struct cpuhp_cpu_state - Per cpu hotplug state storage
  * @state:     The current cpu state
  * @target:    The target state
+ * @fail:      Current CPU hotplug callback state
  * @thread:    Pointer to the hotplug thread
  * @should_run:        Thread should execute
  * @rollback:  Perform a rollback
  * @single:    Single callback invocation
  * @bringup:   Single callback bringup or teardown selector
+ * @cpu:       CPU number
+ * @node:      Remote CPU node; for multi-instance, do a
+ *             single entry callback for install/remove
+ * @last:      For multi-instance rollback, remember how far we got
  * @cb_state:  The state for a single callback (install/uninstall)
  * @result:    Result of the operation
  * @done_up:   Signal completion to the issuer of the task for cpu-up
@@ -106,11 +111,12 @@ static inline void cpuhp_lock_release(bool bringup) { }
 #endif
 
 /**
- * cpuhp_step - Hotplug state machine step
+ * struct cpuhp_step - Hotplug state machine step
  * @name:      Name of the step
  * @startup:   Startup function of the step
  * @teardown:  Teardown function of the step
  * @cant_stop: Bringup/teardown can't be stopped at this step
+ * @multi_instance:    State has multiple instances which get added afterwards
  */
 struct cpuhp_step {
        const char              *name;
@@ -124,7 +130,9 @@ struct cpuhp_step {
                int             (*multi)(unsigned int cpu,
                                         struct hlist_node *node);
        } teardown;
+       /* private: */
        struct hlist_head       list;
+       /* public: */
        bool                    cant_stop;
        bool                    multi_instance;
 };
@@ -143,7 +151,7 @@ static bool cpuhp_step_empty(bool bringup, struct cpuhp_step *step)
 }
 
 /**
- * cpuhp_invoke_callback _ Invoke the callbacks for a given state
+ * cpuhp_invoke_callback - Invoke the callbacks for a given state
  * @cpu:       The cpu for which the callback should be invoked
  * @state:     The state to do callbacks for
  * @bringup:   True if the bringup callback should be invoked
@@ -151,6 +159,8 @@ static bool cpuhp_step_empty(bool bringup, struct cpuhp_step *step)
  * @lastp:     For multi-instance rollback, remember how far we got
  *
  * Called from cpu hotplug and from the state register machinery.
+ *
+ * Return: %0 on success or a negative errno code
  */
 static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
                                 bool bringup, struct hlist_node *node,
@@ -682,6 +692,10 @@ static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
 
        ret = cpuhp_invoke_callback_range(true, cpu, st, target);
        if (ret) {
+               pr_debug("CPU UP failed (%d) CPU %u state %s (%d)\n",
+                        ret, cpu, cpuhp_get_step(st->state)->name,
+                        st->state);
+
                cpuhp_reset_state(st, prev_state);
                if (can_rollback_cpu(st))
                        WARN_ON(cpuhp_invoke_callback_range(false, cpu, st,
@@ -1081,6 +1095,9 @@ static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
 
        ret = cpuhp_invoke_callback_range(false, cpu, st, target);
        if (ret) {
+               pr_debug("CPU DOWN failed (%d) CPU %u state %s (%d)\n",
+                        ret, cpu, cpuhp_get_step(st->state)->name,
+                        st->state);
 
                cpuhp_reset_state(st, prev_state);
 
@@ -1183,6 +1200,8 @@ static int cpu_down(unsigned int cpu, enum cpuhp_state target)
  * This function is meant to be used by device core cpu subsystem only.
  *
  * Other subsystems should use remove_cpu() instead.
+ *
+ * Return: %0 on success or a negative errno code
  */
 int cpu_device_down(struct device *dev)
 {
@@ -1395,6 +1414,8 @@ out:
  * This function is meant to be used by device core cpu subsystem only.
  *
  * Other subsystems should use add_cpu() instead.
+ *
+ * Return: %0 on success or a negative errno code
  */
 int cpu_device_up(struct device *dev)
 {
@@ -1420,6 +1441,8 @@ EXPORT_SYMBOL_GPL(add_cpu);
  * On some architectures like arm64, we can hibernate on any CPU, but on
  * wake up the CPU we hibernated on might be offline as a side effect of
  * using maxcpus= for example.
+ *
+ * Return: %0 on success or a negative errno code
  */
 int bringup_hibernate_cpu(unsigned int sleep_cpu)
 {
@@ -1976,6 +1999,7 @@ EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
 /**
  * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state
  * @state:             The state to setup
+ * @name:              Name of the step
  * @invoke:            If true, the startup function is invoked for cpus where
  *                     cpu state >= @state
  * @startup:           startup callback function
@@ -1984,9 +2008,9 @@ EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
  *                     added afterwards.
  *
  * The caller needs to hold cpus read locked while calling this function.
- * Returns:
+ * Return:
  *   On success:
- *      Positive state number if @state is CPUHP_AP_ONLINE_DYN
+ *      Positive state number if @state is CPUHP_AP_ONLINE_DYN;
  *      0 for all other states
  *   On failure: proper (negative) error code
  */
@@ -2232,18 +2256,17 @@ int cpuhp_smt_enable(void)
 #endif
 
 #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
-static ssize_t show_cpuhp_state(struct device *dev,
-                               struct device_attribute *attr, char *buf)
+static ssize_t state_show(struct device *dev,
+                         struct device_attribute *attr, char *buf)
 {
        struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
 
        return sprintf(buf, "%d\n", st->state);
 }
-static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);
+static DEVICE_ATTR_RO(state);
 
-static ssize_t write_cpuhp_target(struct device *dev,
-                                 struct device_attribute *attr,
-                                 const char *buf, size_t count)
+static ssize_t target_store(struct device *dev, struct device_attribute *attr,
+                           const char *buf, size_t count)
 {
        struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
        struct cpuhp_step *sp;
@@ -2281,19 +2304,17 @@ out:
        return ret ? ret : count;
 }
 
-static ssize_t show_cpuhp_target(struct device *dev,
-                                struct device_attribute *attr, char *buf)
+static ssize_t target_show(struct device *dev,
+                          struct device_attribute *attr, char *buf)
 {
        struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
 
        return sprintf(buf, "%d\n", st->target);
 }
-static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
-
+static DEVICE_ATTR_RW(target);
 
-static ssize_t write_cpuhp_fail(struct device *dev,
-                               struct device_attribute *attr,
-                               const char *buf, size_t count)
+static ssize_t fail_store(struct device *dev, struct device_attribute *attr,
+                         const char *buf, size_t count)
 {
        struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
        struct cpuhp_step *sp;
@@ -2342,15 +2363,15 @@ static ssize_t write_cpuhp_fail(struct device *dev,
        return count;
 }
 
-static ssize_t show_cpuhp_fail(struct device *dev,
-                              struct device_attribute *attr, char *buf)
+static ssize_t fail_show(struct device *dev,
+                        struct device_attribute *attr, char *buf)
 {
        struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
 
        return sprintf(buf, "%d\n", st->fail);
 }
 
-static DEVICE_ATTR(fail, 0644, show_cpuhp_fail, write_cpuhp_fail);
+static DEVICE_ATTR_RW(fail);
 
 static struct attribute *cpuhp_cpu_attrs[] = {
        &dev_attr_state.attr,
@@ -2365,7 +2386,7 @@ static const struct attribute_group cpuhp_cpu_attr_group = {
        NULL
 };
 
-static ssize_t show_cpuhp_states(struct device *dev,
+static ssize_t states_show(struct device *dev,
                                 struct device_attribute *attr, char *buf)
 {
        ssize_t cur, res = 0;
@@ -2384,7 +2405,7 @@ static ssize_t show_cpuhp_states(struct device *dev,
        mutex_unlock(&cpuhp_state_mutex);
        return res;
 }
-static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);
+static DEVICE_ATTR_RO(states);
 
 static struct attribute *cpuhp_cpu_root_attrs[] = {
        &dev_attr_states.attr,
@@ -2457,28 +2478,27 @@ static const char *smt_states[] = {
        [CPU_SMT_NOT_IMPLEMENTED]       = "notimplemented",
 };
 
-static ssize_t
-show_smt_control(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t control_show(struct device *dev,
+                           struct device_attribute *attr, char *buf)
 {
        const char *state = smt_states[cpu_smt_control];
 
        return snprintf(buf, PAGE_SIZE - 2, "%s\n", state);
 }
 
-static ssize_t
-store_smt_control(struct device *dev, struct device_attribute *attr,
-                 const char *buf, size_t count)
+static ssize_t control_store(struct device *dev, struct device_attribute *attr,
+                            const char *buf, size_t count)
 {
        return __store_smt_control(dev, attr, buf, count);
 }
-static DEVICE_ATTR(control, 0644, show_smt_control, store_smt_control);
+static DEVICE_ATTR_RW(control);
 
-static ssize_t
-show_smt_active(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t active_show(struct device *dev,
+                          struct device_attribute *attr, char *buf)
 {
        return snprintf(buf, PAGE_SIZE - 2, "%d\n", sched_smt_active());
 }
-static DEVICE_ATTR(active, 0444, show_smt_active, NULL);
+static DEVICE_ATTR_RO(active);
 
 static struct attribute *cpuhp_smt_attrs[] = {
        &dev_attr_control.attr,
index e6fd2b3..f784e08 100644 (file)
@@ -286,13 +286,13 @@ struct cred *prepare_creds(void)
        new->security = NULL;
 #endif
 
-       if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0)
-               goto error;
-
        new->ucounts = get_ucounts(new->ucounts);
        if (!new->ucounts)
                goto error;
 
+       if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0)
+               goto error;
+
        validate_creds(new);
        return new;
 
@@ -753,13 +753,13 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
 #ifdef CONFIG_SECURITY
        new->security = NULL;
 #endif
-       if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0)
-               goto error;
-
        new->ucounts = get_ucounts(new->ucounts);
        if (!new->ucounts)
                goto error;
 
+       if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0)
+               goto error;
+
        put_cred(old);
        validate_creds(new);
        return new;
index 4649170..1cb1f9b 100644 (file)
@@ -11917,6 +11917,37 @@ again:
        return gctx;
 }
 
+static bool
+perf_check_permission(struct perf_event_attr *attr, struct task_struct *task)
+{
+       unsigned int ptrace_mode = PTRACE_MODE_READ_REALCREDS;
+       bool is_capable = perfmon_capable();
+
+       if (attr->sigtrap) {
+               /*
+                * perf_event_attr::sigtrap sends signals to the other task.
+                * Require the current task to also have CAP_KILL.
+                */
+               rcu_read_lock();
+               is_capable &= ns_capable(__task_cred(task)->user_ns, CAP_KILL);
+               rcu_read_unlock();
+
+               /*
+                * If the required capabilities aren't available, checks for
+                * ptrace permissions: upgrade to ATTACH, since sending signals
+                * can effectively change the target task.
+                */
+               ptrace_mode = PTRACE_MODE_ATTACH_REALCREDS;
+       }
+
+       /*
+        * Preserve ptrace permission check for backwards compatibility. The
+        * ptrace check also includes checks that the current task and other
+        * task have matching uids, and is therefore not done here explicitly.
+        */
+       return is_capable || ptrace_may_access(task, ptrace_mode);
+}
+
 /**
  * sys_perf_event_open - open a performance event, associate it to a task/cpu
  *
@@ -12163,15 +12194,13 @@ SYSCALL_DEFINE5(perf_event_open,
                        goto err_file;
 
                /*
-                * Preserve ptrace permission check for backwards compatibility.
-                *
                 * We must hold exec_update_lock across this and any potential
                 * perf_install_in_context() call for this new event to
                 * serialize against exec() altering our credentials (and the
                 * perf_event_exit_task() that could imply).
                 */
                err = -EACCES;
-               if (!perfmon_capable() && !ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
+               if (!perf_check_permission(&attr, task))
                        goto err_cred;
        }
 
index 8359734..f32320a 100644 (file)
@@ -568,7 +568,7 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr,
        if (!cpu_events)
                return (void __percpu __force *)ERR_PTR(-ENOMEM);
 
-       get_online_cpus();
+       cpus_read_lock();
        for_each_online_cpu(cpu) {
                bp = perf_event_create_kernel_counter(attr, cpu, NULL,
                                                      triggered, context);
@@ -579,7 +579,7 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr,
 
                per_cpu(*cpu_events, cpu) = bp;
        }
-       put_online_cpus();
+       cpus_read_unlock();
 
        if (likely(!err))
                return cpu_events;
index bc94b2c..757301c 100644 (file)
@@ -446,6 +446,7 @@ void put_task_stack(struct task_struct *tsk)
 
 void free_task(struct task_struct *tsk)
 {
+       release_user_cpus_ptr(tsk);
        scs_release(tsk);
 
 #ifndef CONFIG_THREAD_INFO_IN_TASK
@@ -828,10 +829,10 @@ void __init fork_init(void)
        for (i = 0; i < MAX_PER_NAMESPACE_UCOUNTS; i++)
                init_user_ns.ucount_max[i] = max_threads/2;
 
-       set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_NPROC, task_rlimit(&init_task, RLIMIT_NPROC));
-       set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_MSGQUEUE, task_rlimit(&init_task, RLIMIT_MSGQUEUE));
-       set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_SIGPENDING, task_rlimit(&init_task, RLIMIT_SIGPENDING));
-       set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_MEMLOCK, task_rlimit(&init_task, RLIMIT_MEMLOCK));
+       set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_NPROC,      RLIM_INFINITY);
+       set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_MSGQUEUE,   RLIM_INFINITY);
+       set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_SIGPENDING, RLIM_INFINITY);
+       set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_MEMLOCK,    RLIM_INFINITY);
 
 #ifdef CONFIG_VMAP_STACK
        cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "fork:vm_stack_cache",
@@ -924,6 +925,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
 #endif
        if (orig->cpus_ptr == &orig->cpus_mask)
                tsk->cpus_ptr = &tsk->cpus_mask;
+       dup_user_cpus_ptr(tsk, orig, node);
 
        /*
         * One for the user space visible state that goes away when reaped.
index 2ecb075..e7b4c61 100644 (file)
@@ -179,7 +179,7 @@ struct futex_pi_state {
        /*
         * The PI object:
         */
-       struct rt_mutex pi_mutex;
+       struct rt_mutex_base pi_mutex;
 
        struct task_struct *owner;
        refcount_t refcount;
@@ -197,6 +197,8 @@ struct futex_pi_state {
  * @rt_waiter:         rt_waiter storage for use with requeue_pi
  * @requeue_pi_key:    the requeue_pi target futex key
  * @bitset:            bitset for the optional bitmasked wakeup
+ * @requeue_state:     State field for futex_requeue_pi()
+ * @requeue_wait:      RCU wait for futex_requeue_pi() (RT only)
  *
  * We use this hashed waitqueue, instead of a normal wait_queue_entry_t, so
  * we can wake only the relevant ones (hashed queues may be shared).
@@ -219,12 +221,68 @@ struct futex_q {
        struct rt_mutex_waiter *rt_waiter;
        union futex_key *requeue_pi_key;
        u32 bitset;
+       atomic_t requeue_state;
+#ifdef CONFIG_PREEMPT_RT
+       struct rcuwait requeue_wait;
+#endif
 } __randomize_layout;
 
+/*
+ * On PREEMPT_RT, the hash bucket lock is a 'sleeping' spinlock with an
+ * underlying rtmutex. The task which is about to be requeued could have
+ * just woken up (timeout, signal). After the wake up the task has to
+ * acquire hash bucket lock, which is held by the requeue code.  As a task
+ * can only be blocked on _ONE_ rtmutex at a time, the proxy lock blocking
+ * and the hash bucket lock blocking would collide and corrupt state.
+ *
+ * On !PREEMPT_RT this is not a problem and everything could be serialized
+ * on hash bucket lock, but aside of having the benefit of common code,
+ * this allows to avoid doing the requeue when the task is already on the
+ * way out and taking the hash bucket lock of the original uaddr1 when the
+ * requeue has been completed.
+ *
+ * The following state transitions are valid:
+ *
+ * On the waiter side:
+ *   Q_REQUEUE_PI_NONE         -> Q_REQUEUE_PI_IGNORE
+ *   Q_REQUEUE_PI_IN_PROGRESS  -> Q_REQUEUE_PI_WAIT
+ *
+ * On the requeue side:
+ *   Q_REQUEUE_PI_NONE         -> Q_REQUEUE_PI_INPROGRESS
+ *   Q_REQUEUE_PI_IN_PROGRESS  -> Q_REQUEUE_PI_DONE/LOCKED
+ *   Q_REQUEUE_PI_IN_PROGRESS  -> Q_REQUEUE_PI_NONE (requeue failed)
+ *   Q_REQUEUE_PI_WAIT         -> Q_REQUEUE_PI_DONE/LOCKED
+ *   Q_REQUEUE_PI_WAIT         -> Q_REQUEUE_PI_IGNORE (requeue failed)
+ *
+ * The requeue side ignores a waiter with state Q_REQUEUE_PI_IGNORE as this
+ * signals that the waiter is already on the way out. It also means that
+ * the waiter is still on the 'wait' futex, i.e. uaddr1.
+ *
+ * The waiter side signals early wakeup to the requeue side either through
+ * setting state to Q_REQUEUE_PI_IGNORE or to Q_REQUEUE_PI_WAIT depending
+ * on the current state. In case of Q_REQUEUE_PI_IGNORE it can immediately
+ * proceed to take the hash bucket lock of uaddr1. If it set state to WAIT,
+ * which means the wakeup is interleaving with a requeue in progress it has
+ * to wait for the requeue side to change the state. Either to DONE/LOCKED
+ * or to IGNORE. DONE/LOCKED means the waiter q is now on the uaddr2 futex
+ * and either blocked (DONE) or has acquired it (LOCKED). IGNORE is set by
+ * the requeue side when the requeue attempt failed via deadlock detection
+ * and therefore the waiter q is still on the uaddr1 futex.
+ */
+enum {
+       Q_REQUEUE_PI_NONE               =  0,
+       Q_REQUEUE_PI_IGNORE,
+       Q_REQUEUE_PI_IN_PROGRESS,
+       Q_REQUEUE_PI_WAIT,
+       Q_REQUEUE_PI_DONE,
+       Q_REQUEUE_PI_LOCKED,
+};
+
 static const struct futex_q futex_q_init = {
        /* list gets initialized in queue_me()*/
-       .key = FUTEX_KEY_INIT,
-       .bitset = FUTEX_BITSET_MATCH_ANY
+       .key            = FUTEX_KEY_INIT,
+       .bitset         = FUTEX_BITSET_MATCH_ANY,
+       .requeue_state  = ATOMIC_INIT(Q_REQUEUE_PI_NONE),
 };
 
 /*
@@ -1299,27 +1357,6 @@ static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key,
        return 0;
 }
 
-static int lookup_pi_state(u32 __user *uaddr, u32 uval,
-                          struct futex_hash_bucket *hb,
-                          union futex_key *key, struct futex_pi_state **ps,
-                          struct task_struct **exiting)
-{
-       struct futex_q *top_waiter = futex_top_waiter(hb, key);
-
-       /*
-        * If there is a waiter on that futex, validate it and
-        * attach to the pi_state when the validation succeeds.
-        */
-       if (top_waiter)
-               return attach_to_pi_state(uaddr, uval, top_waiter->pi_state, ps);
-
-       /*
-        * We are the first waiter - try to look up the owner based on
-        * @uval and attach to it.
-        */
-       return attach_to_pi_owner(uaddr, uval, key, ps, exiting);
-}
-
 static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
 {
        int err;
@@ -1354,7 +1391,7 @@ static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
  *  -  1 - acquired the lock;
  *  - <0 - error
  *
- * The hb->lock and futex_key refs shall be held by the caller.
+ * The hb->lock must be held by the caller.
  *
  * @exiting is only set when the return value is -EBUSY. If so, this holds
  * a refcount on the exiting task on return and the caller needs to drop it
@@ -1493,11 +1530,11 @@ static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
  */
 static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_state)
 {
-       u32 curval, newval;
        struct rt_mutex_waiter *top_waiter;
        struct task_struct *new_owner;
        bool postunlock = false;
-       DEFINE_WAKE_Q(wake_q);
+       DEFINE_RT_WAKE_Q(wqh);
+       u32 curval, newval;
        int ret = 0;
 
        top_waiter = rt_mutex_top_waiter(&pi_state->pi_mutex);
@@ -1549,14 +1586,14 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_
                 * not fail.
                 */
                pi_state_update_owner(pi_state, new_owner);
-               postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
+               postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wqh);
        }
 
 out_unlock:
        raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
 
        if (postunlock)
-               rt_mutex_postunlock(&wake_q);
+               rt_mutex_postunlock(&wqh);
 
        return ret;
 }
@@ -1793,6 +1830,108 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
        q->key = *key2;
 }
 
+static inline bool futex_requeue_pi_prepare(struct futex_q *q,
+                                           struct futex_pi_state *pi_state)
+{
+       int old, new;
+
+       /*
+        * Set state to Q_REQUEUE_PI_IN_PROGRESS unless an early wakeup has
+        * already set Q_REQUEUE_PI_IGNORE to signal that requeue should
+        * ignore the waiter.
+        */
+       old = atomic_read_acquire(&q->requeue_state);
+       do {
+               if (old == Q_REQUEUE_PI_IGNORE)
+                       return false;
+
+               /*
+                * futex_proxy_trylock_atomic() might have set it to
+                * IN_PROGRESS and a interleaved early wake to WAIT.
+                *
+                * It was considered to have an extra state for that
+                * trylock, but that would just add more conditionals
+                * all over the place for a dubious value.
+                */
+               if (old != Q_REQUEUE_PI_NONE)
+                       break;
+
+               new = Q_REQUEUE_PI_IN_PROGRESS;
+       } while (!atomic_try_cmpxchg(&q->requeue_state, &old, new));
+
+       q->pi_state = pi_state;
+       return true;
+}
+
+static inline void futex_requeue_pi_complete(struct futex_q *q, int locked)
+{
+       int old, new;
+
+       old = atomic_read_acquire(&q->requeue_state);
+       do {
+               if (old == Q_REQUEUE_PI_IGNORE)
+                       return;
+
+               if (locked >= 0) {
+                       /* Requeue succeeded. Set DONE or LOCKED */
+                       WARN_ON_ONCE(old != Q_REQUEUE_PI_IN_PROGRESS &&
+                                    old != Q_REQUEUE_PI_WAIT);
+                       new = Q_REQUEUE_PI_DONE + locked;
+               } else if (old == Q_REQUEUE_PI_IN_PROGRESS) {
+                       /* Deadlock, no early wakeup interleave */
+                       new = Q_REQUEUE_PI_NONE;
+               } else {
+                       /* Deadlock, early wakeup interleave. */
+                       WARN_ON_ONCE(old != Q_REQUEUE_PI_WAIT);
+                       new = Q_REQUEUE_PI_IGNORE;
+               }
+       } while (!atomic_try_cmpxchg(&q->requeue_state, &old, new));
+
+#ifdef CONFIG_PREEMPT_RT
+       /* If the waiter interleaved with the requeue let it know */
+       if (unlikely(old == Q_REQUEUE_PI_WAIT))
+               rcuwait_wake_up(&q->requeue_wait);
+#endif
+}
+
+static inline int futex_requeue_pi_wakeup_sync(struct futex_q *q)
+{
+       int old, new;
+
+       old = atomic_read_acquire(&q->requeue_state);
+       do {
+               /* Is requeue done already? */
+               if (old >= Q_REQUEUE_PI_DONE)
+                       return old;
+
+               /*
+                * If not done, then tell the requeue code to either ignore
+                * the waiter or to wake it up once the requeue is done.
+                */
+               new = Q_REQUEUE_PI_WAIT;
+               if (old == Q_REQUEUE_PI_NONE)
+                       new = Q_REQUEUE_PI_IGNORE;
+       } while (!atomic_try_cmpxchg(&q->requeue_state, &old, new));
+
+       /* If the requeue was in progress, wait for it to complete */
+       if (old == Q_REQUEUE_PI_IN_PROGRESS) {
+#ifdef CONFIG_PREEMPT_RT
+               rcuwait_wait_event(&q->requeue_wait,
+                                  atomic_read(&q->requeue_state) != Q_REQUEUE_PI_WAIT,
+                                  TASK_UNINTERRUPTIBLE);
+#else
+               (void)atomic_cond_read_relaxed(&q->requeue_state, VAL != Q_REQUEUE_PI_WAIT);
+#endif
+       }
+
+       /*
+        * Requeue is now either prohibited or complete. Reread state
+        * because during the wait above it might have changed. Nothing
+        * will modify q->requeue_state after this point.
+        */
+       return atomic_read(&q->requeue_state);
+}
+
 /**
  * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
  * @q:         the futex_q
@@ -1820,6 +1959,8 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
 
        q->lock_ptr = &hb->lock;
 
+       /* Signal locked state to the waiter */
+       futex_requeue_pi_complete(q, 1);
        wake_up_state(q->task, TASK_NORMAL);
 }
 
@@ -1879,10 +2020,21 @@ futex_proxy_trylock_atomic(u32 __user *pifutex, struct futex_hash_bucket *hb1,
        if (!top_waiter)
                return 0;
 
+       /*
+        * Ensure that this is a waiter sitting in futex_wait_requeue_pi()
+        * and waiting on the 'waitqueue' futex which is always !PI.
+        */
+       if (!top_waiter->rt_waiter || top_waiter->pi_state)
+               ret = -EINVAL;
+
        /* Ensure we requeue to the expected futex. */
        if (!match_futex(top_waiter->requeue_pi_key, key2))
                return -EINVAL;
 
+       /* Ensure that this does not race against an early wakeup */
+       if (!futex_requeue_pi_prepare(top_waiter, NULL))
+               return -EAGAIN;
+
        /*
         * Try to take the lock for top_waiter.  Set the FUTEX_WAITERS bit in
         * the contended case or if set_waiters is 1.  The pi_state is returned
@@ -1892,8 +2044,22 @@ futex_proxy_trylock_atomic(u32 __user *pifutex, struct futex_hash_bucket *hb1,
        ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
                                   exiting, set_waiters);
        if (ret == 1) {
+               /* Dequeue, wake up and update top_waiter::requeue_state */
                requeue_pi_wake_futex(top_waiter, key2, hb2);
                return vpid;
+       } else if (ret < 0) {
+               /* Rewind top_waiter::requeue_state */
+               futex_requeue_pi_complete(top_waiter, ret);
+       } else {
+               /*
+                * futex_lock_pi_atomic() did not acquire the user space
+                * futex, but managed to establish the proxy lock and pi
+                * state. top_waiter::requeue_state cannot be fixed up here
+                * because the waiter is not enqueued on the rtmutex
+                * yet. This is handled at the callsite depending on the
+                * result of rt_mutex_start_proxy_lock() which is
+                * guaranteed to be reached with this function returning 0.
+                */
        }
        return ret;
 }
@@ -1947,24 +2113,36 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
                if (uaddr1 == uaddr2)
                        return -EINVAL;
 
+               /*
+                * futex_requeue() allows the caller to define the number
+                * of waiters to wake up via the @nr_wake argument. With
+                * REQUEUE_PI, waking up more than one waiter is creating
+                * more problems than it solves. Waking up a waiter makes
+                * only sense if the PI futex @uaddr2 is uncontended as
+                * this allows the requeue code to acquire the futex
+                * @uaddr2 before waking the waiter. The waiter can then
+                * return to user space without further action. A secondary
+                * wakeup would just make the futex_wait_requeue_pi()
+                * handling more complex, because that code would have to
+                * look up pi_state and do more or less all the handling
+                * which the requeue code has to do for the to be requeued
+                * waiters. So restrict the number of waiters to wake to
+                * one, and only wake it up when the PI futex is
+                * uncontended. Otherwise requeue it and let the unlock of
+                * the PI futex handle the wakeup.
+                *
+                * All REQUEUE_PI users, e.g. pthread_cond_signal() and
+                * pthread_cond_broadcast() must use nr_wake=1.
+                */
+               if (nr_wake != 1)
+                       return -EINVAL;
+
                /*
                 * requeue_pi requires a pi_state, try to allocate it now
                 * without any locks in case it fails.
                 */
                if (refill_pi_state_cache())
                        return -ENOMEM;
-               /*
-                * requeue_pi must wake as many tasks as it can, up to nr_wake
-                * + nr_requeue, since it acquires the rt_mutex prior to
-                * returning to userspace, so as to not leave the rt_mutex with
-                * waiters and no owner.  However, second and third wake-ups
-                * cannot be predicted as they involve race conditions with the
-                * first wake and a fault while looking up the pi_state.  Both
-                * pthread_cond_signal() and pthread_cond_broadcast() should
-                * use nr_wake=1.
-                */
-               if (nr_wake != 1)
-                       return -EINVAL;
        }
 
 retry:
@@ -2014,7 +2192,7 @@ retry_private:
                }
        }
 
-       if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
+       if (requeue_pi) {
                struct task_struct *exiting = NULL;
 
                /*
@@ -2022,6 +2200,8 @@ retry_private:
                 * intend to requeue waiters, force setting the FUTEX_WAITERS
                 * bit.  We force this here where we are able to easily handle
                 * faults rather in the requeue loop below.
+                *
+                * Updates topwaiter::requeue_state if a top waiter exists.
                 */
                ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
                                                 &key2, &pi_state,
@@ -2031,28 +2211,52 @@ retry_private:
                 * At this point the top_waiter has either taken uaddr2 or is
                 * waiting on it.  If the former, then the pi_state will not
                 * exist yet, look it up one more time to ensure we have a
-                * reference to it. If the lock was taken, ret contains the
-                * vpid of the top waiter task.
+                * reference to it. If the lock was taken, @ret contains the
+                * VPID of the top waiter task.
                 * If the lock was not taken, we have pi_state and an initial
                 * refcount on it. In case of an error we have nothing.
+                *
+                * The top waiter's requeue_state is up to date:
+                *
+                *  - If the lock was acquired atomically (ret > 0), then
+                *    the state is Q_REQUEUE_PI_LOCKED.
+                *
+                *  - If the trylock failed with an error (ret < 0) then
+                *    the state is either Q_REQUEUE_PI_NONE, i.e. "nothing
+                *    happened", or Q_REQUEUE_PI_IGNORE when there was an
+                *    interleaved early wakeup.
+                *
+                *  - If the trylock did not succeed (ret == 0) then the
+                *    state is either Q_REQUEUE_PI_IN_PROGRESS or
+                *    Q_REQUEUE_PI_WAIT if an early wakeup interleaved.
+                *    This will be cleaned up in the loop below, which
+                *    cannot fail because futex_proxy_trylock_atomic() did
+                *    the same sanity checks for requeue_pi as the loop
+                *    below does.
                 */
                if (ret > 0) {
                        WARN_ON(pi_state);
                        task_count++;
                        /*
-                        * If we acquired the lock, then the user space value
-                        * of uaddr2 should be vpid. It cannot be changed by
-                        * the top waiter as it is blocked on hb2 lock if it
-                        * tries to do so. If something fiddled with it behind
-                        * our back the pi state lookup might unearth it. So
-                        * we rather use the known value than rereading and
-                        * handing potential crap to lookup_pi_state.
+                        * If futex_proxy_trylock_atomic() acquired the
+                        * user space futex, then the user space value
+                        * @uaddr2 has been set to the @hb1's top waiter
+                        * task VPID. This task is guaranteed to be alive
+                        * and cannot be exiting because it is either
+                        * sleeping or blocked on @hb2 lock.
+                        *
+                        * The @uaddr2 futex cannot have waiters either as
+                        * otherwise futex_proxy_trylock_atomic() would not
+                        * have succeeded.
                         *
-                        * If that call succeeds then we have pi_state and an
-                        * initial refcount on it.
+                        * In order to requeue waiters to @hb2, pi state is
+                        * required. Hand in the VPID value (@ret) and
+                        * allocate PI state with an initial refcount on
+                        * it.
                         */
-                       ret = lookup_pi_state(uaddr2, ret, hb2, &key2,
-                                             &pi_state, &exiting);
+                       ret = attach_to_pi_owner(uaddr2, ret, &key2, &pi_state,
+                                                &exiting);
+                       WARN_ON(ret);
                }
 
                switch (ret) {
@@ -2060,7 +2264,10 @@ retry_private:
                        /* We hold a reference on the pi state. */
                        break;
 
-                       /* If the above failed, then pi_state is NULL */
+               /*
+                * If the above failed, then pi_state is NULL and
+                * waiter::requeue_state is correct.
+                */
                case -EFAULT:
                        double_unlock_hb(hb1, hb2);
                        hb_waiters_dec(hb2);
@@ -2112,18 +2319,17 @@ retry_private:
                        break;
                }
 
-               /*
-                * Wake nr_wake waiters.  For requeue_pi, if we acquired the
-                * lock, we already woke the top_waiter.  If not, it will be
-                * woken by futex_unlock_pi().
-                */
-               if (++task_count <= nr_wake && !requeue_pi) {
-                       mark_wake_futex(&wake_q, this);
+               /* Plain futexes just wake or requeue and are done */
+               if (!requeue_pi) {
+                       if (++task_count <= nr_wake)
+                               mark_wake_futex(&wake_q, this);
+                       else
+                               requeue_futex(this, hb1, hb2, &key2);
                        continue;
                }
 
                /* Ensure we requeue to the expected futex for requeue_pi. */
-               if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
+               if (!match_futex(this->requeue_pi_key, &key2)) {
                        ret = -EINVAL;
                        break;
                }
@@ -2131,54 +2337,67 @@ retry_private:
                /*
                 * Requeue nr_requeue waiters and possibly one more in the case
                 * of requeue_pi if we couldn't acquire the lock atomically.
+                *
+                * Prepare the waiter to take the rt_mutex. Take a refcount
+                * on the pi_state and store the pointer in the futex_q
+                * object of the waiter.
                 */
-               if (requeue_pi) {
+               get_pi_state(pi_state);
+
+               /* Don't requeue when the waiter is already on the way out. */
+               if (!futex_requeue_pi_prepare(this, pi_state)) {
                        /*
-                        * Prepare the waiter to take the rt_mutex. Take a
-                        * refcount on the pi_state and store the pointer in
-                        * the futex_q object of the waiter.
+                        * Early woken waiter signaled that it is on the
+                        * way out. Drop the pi_state reference and try the
+                        * next waiter. @this->pi_state is still NULL.
                         */
-                       get_pi_state(pi_state);
-                       this->pi_state = pi_state;
-                       ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
-                                                       this->rt_waiter,
-                                                       this->task);
-                       if (ret == 1) {
-                               /*
-                                * We got the lock. We do neither drop the
-                                * refcount on pi_state nor clear
-                                * this->pi_state because the waiter needs the
-                                * pi_state for cleaning up the user space
-                                * value. It will drop the refcount after
-                                * doing so.
-                                */
-                               requeue_pi_wake_futex(this, &key2, hb2);
-                               continue;
-                       } else if (ret) {
-                               /*
-                                * rt_mutex_start_proxy_lock() detected a
-                                * potential deadlock when we tried to queue
-                                * that waiter. Drop the pi_state reference
-                                * which we took above and remove the pointer
-                                * to the state from the waiters futex_q
-                                * object.
-                                */
-                               this->pi_state = NULL;
-                               put_pi_state(pi_state);
-                               /*
-                                * We stop queueing more waiters and let user
-                                * space deal with the mess.
-                                */
-                               break;
-                       }
+                       put_pi_state(pi_state);
+                       continue;
+               }
+
+               ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
+                                               this->rt_waiter,
+                                               this->task);
+
+               if (ret == 1) {
+                       /*
+                        * We got the lock. We do neither drop the refcount
+                        * on pi_state nor clear this->pi_state because the
+                        * waiter needs the pi_state for cleaning up the
+                        * user space value. It will drop the refcount
+                        * after doing so. this::requeue_state is updated
+                        * in the wakeup as well.
+                        */
+                       requeue_pi_wake_futex(this, &key2, hb2);
+                       task_count++;
+               } else if (!ret) {
+                       /* Waiter is queued, move it to hb2 */
+                       requeue_futex(this, hb1, hb2, &key2);
+                       futex_requeue_pi_complete(this, 0);
+                       task_count++;
+               } else {
+                       /*
+                        * rt_mutex_start_proxy_lock() detected a potential
+                        * deadlock when we tried to queue that waiter.
+                        * Drop the pi_state reference which we took above
+                        * and remove the pointer to the state from the
+                        * waiters futex_q object.
+                        */
+                       this->pi_state = NULL;
+                       put_pi_state(pi_state);
+                       futex_requeue_pi_complete(this, ret);
+                       /*
+                        * We stop queueing more waiters and let user space
+                        * deal with the mess.
+                        */
+                       break;
                }
-               requeue_futex(this, hb1, hb2, &key2);
        }
 
        /*
-        * We took an extra initial reference to the pi_state either
-        * in futex_proxy_trylock_atomic() or in lookup_pi_state(). We
-        * need to drop it here again.
+        * We took an extra initial reference to the pi_state either in
+        * futex_proxy_trylock_atomic() or in attach_to_pi_owner(). We need
+        * to drop it here again.
         */
        put_pi_state(pi_state);
 
@@ -2357,7 +2576,7 @@ static int __fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
         * Modifying pi_state _before_ the user space value would leave the
         * pi_state in an inconsistent state when we fault here, because we
         * need to drop the locks to handle the fault. This might be observed
-        * in the PID check in lookup_pi_state.
+        * in the PID checks when attaching to PI state .
         */
 retry:
        if (!argowner) {
@@ -2614,8 +2833,7 @@ static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
  *
  * Setup the futex_q and locate the hash_bucket.  Get the futex value and
  * compare it with the expected value.  Handle atomic faults internally.
- * Return with the hb lock held and a q.key reference on success, and unlocked
- * with no q.key reference on failure.
+ * Return with the hb lock held on success, and unlocked on failure.
  *
  * Return:
  *  -  0 - uaddr contains val and hb has been locked;
@@ -2693,8 +2911,8 @@ static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
                               current->timer_slack_ns);
 retry:
        /*
-        * Prepare to wait on uaddr. On success, holds hb lock and increments
-        * q.key refs.
+        * Prepare to wait on uaddr. On success, it holds hb->lock and q
+        * is initialized.
         */
        ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
        if (ret)
@@ -2705,7 +2923,6 @@ retry:
 
        /* If we were woken (and unqueued), we succeeded, whatever. */
        ret = 0;
-       /* unqueue_me() drops q.key ref */
        if (!unqueue_me(&q))
                goto out;
        ret = -ETIMEDOUT;
@@ -3072,27 +3289,22 @@ pi_faulted:
 }
 
 /**
- * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex
+ * handle_early_requeue_pi_wakeup() - Handle early wakeup on the initial futex
  * @hb:                the hash_bucket futex_q was original enqueued on
  * @q:         the futex_q woken while waiting to be requeued
- * @key2:      the futex_key of the requeue target futex
  * @timeout:   the timeout associated with the wait (NULL if none)
  *
- * Detect if the task was woken on the initial futex as opposed to the requeue
- * target futex.  If so, determine if it was a timeout or a signal that caused
- * the wakeup and return the appropriate error code to the caller.  Must be
- * called with the hb lock held.
+ * Determine the cause for the early wakeup.
  *
  * Return:
- *  -  0 = no early wakeup detected;
- *  - <0 = -ETIMEDOUT or -ERESTARTNOINTR
+ *  -EWOULDBLOCK or -ETIMEDOUT or -ERESTARTNOINTR
  */
 static inline
 int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
-                                  struct futex_q *q, union futex_key *key2,
+                                  struct futex_q *q,
                                   struct hrtimer_sleeper *timeout)
 {
-       int ret = 0;
+       int ret;
 
        /*
         * With the hb lock held, we avoid races while we process the wakeup.
@@ -3101,22 +3313,21 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
         * It can't be requeued from uaddr2 to something else since we don't
         * support a PI aware source futex for requeue.
         */
-       if (!match_futex(&q->key, key2)) {
-               WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr));
-               /*
-                * We were woken prior to requeue by a timeout or a signal.
-                * Unqueue the futex_q and determine which it was.
-                */
-               plist_del(&q->list, &hb->chain);
-               hb_waiters_dec(hb);
+       WARN_ON_ONCE(&hb->lock != q->lock_ptr);
 
-               /* Handle spurious wakeups gracefully */
-               ret = -EWOULDBLOCK;
-               if (timeout && !timeout->task)
-                       ret = -ETIMEDOUT;
-               else if (signal_pending(current))
-                       ret = -ERESTARTNOINTR;
-       }
+       /*
+        * We were woken prior to requeue by a timeout or a signal.
+        * Unqueue the futex_q and determine which it was.
+        */
+       plist_del(&q->list, &hb->chain);
+       hb_waiters_dec(hb);
+
+       /* Handle spurious wakeups gracefully */
+       ret = -EWOULDBLOCK;
+       if (timeout && !timeout->task)
+               ret = -ETIMEDOUT;
+       else if (signal_pending(current))
+               ret = -ERESTARTNOINTR;
        return ret;
 }
 
@@ -3169,6 +3380,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
        struct futex_hash_bucket *hb;
        union futex_key key2 = FUTEX_KEY_INIT;
        struct futex_q q = futex_q_init;
+       struct rt_mutex_base *pi_mutex;
        int res, ret;
 
        if (!IS_ENABLED(CONFIG_FUTEX_PI))
@@ -3198,8 +3410,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
        q.requeue_pi_key = &key2;
 
        /*
-        * Prepare to wait on uaddr. On success, increments q.key (key1) ref
-        * count.
+        * Prepare to wait on uaddr. On success, it holds hb->lock and q
+        * is initialized.
         */
        ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
        if (ret)
@@ -3218,32 +3430,22 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
        /* Queue the futex_q, drop the hb lock, wait for wakeup. */
        futex_wait_queue_me(hb, &q, to);
 
-       spin_lock(&hb->lock);
-       ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
-       spin_unlock(&hb->lock);
-       if (ret)
-               goto out;
-
-       /*
-        * In order for us to be here, we know our q.key == key2, and since
-        * we took the hb->lock above, we also know that futex_requeue() has
-        * completed and we no longer have to concern ourselves with a wakeup
-        * race with the atomic proxy lock acquisition by the requeue code. The
-        * futex_requeue dropped our key1 reference and incremented our key2
-        * reference count.
-        */
+       switch (futex_requeue_pi_wakeup_sync(&q)) {
+       case Q_REQUEUE_PI_IGNORE:
+               /* The waiter is still on uaddr1 */
+               spin_lock(&hb->lock);
+               ret = handle_early_requeue_pi_wakeup(hb, &q, to);
+               spin_unlock(&hb->lock);
+               break;
 
-       /*
-        * Check if the requeue code acquired the second futex for us and do
-        * any pertinent fixup.
-        */
-       if (!q.rt_waiter) {
+       case Q_REQUEUE_PI_LOCKED:
+               /* The requeue acquired the lock */
                if (q.pi_state && (q.pi_state->owner != current)) {
                        spin_lock(q.lock_ptr);
                        ret = fixup_owner(uaddr2, &q, true);
                        /*
-                        * Drop the reference to the pi state which
-                        * the requeue_pi() code acquired for us.
+                        * Drop the reference to the pi state which the
+                        * requeue_pi() code acquired for us.
                         */
                        put_pi_state(q.pi_state);
                        spin_unlock(q.lock_ptr);
@@ -3253,18 +3455,14 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
                         */
                        ret = ret < 0 ? ret : 0;
                }
-       } else {
-               struct rt_mutex *pi_mutex;
+               break;
 
-               /*
-                * We have been woken up by futex_unlock_pi(), a timeout, or a
-                * signal.  futex_unlock_pi() will not destroy the lock_ptr nor
-                * the pi_state.
-                */
-               WARN_ON(!q.pi_state);
+       case Q_REQUEUE_PI_DONE:
+               /* Requeue completed. Current is 'pi_blocked_on' the rtmutex */
                pi_mutex = &q.pi_state->pi_mutex;
                ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter);
 
+               /* Current is not longer pi_blocked_on */
                spin_lock(q.lock_ptr);
                if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter))
                        ret = 0;
@@ -3284,17 +3482,21 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
 
                unqueue_me_pi(&q);
                spin_unlock(q.lock_ptr);
-       }
 
-       if (ret == -EINTR) {
-               /*
-                * We've already been requeued, but cannot restart by calling
-                * futex_lock_pi() directly. We could restart this syscall, but
-                * it would detect that the user space "val" changed and return
-                * -EWOULDBLOCK.  Save the overhead of the restart and return
-                * -EWOULDBLOCK directly.
-                */
-               ret = -EWOULDBLOCK;
+               if (ret == -EINTR) {
+                       /*
+                        * We've already been requeued, but cannot restart
+                        * by calling futex_lock_pi() directly. We could
+                        * restart this syscall, but it would detect that
+                        * the user space "val" changed and return
+                        * -EWOULDBLOCK.  Save the overhead of the restart
+                        * and return -EWOULDBLOCK directly.
+                        */
+                       ret = -EWOULDBLOCK;
+               }
+               break;
+       default:
+               BUG();
        }
 
 out:
index 4d89ad4..f7ff891 100644 (file)
@@ -355,7 +355,7 @@ static int irq_build_affinity_masks(unsigned int startvec, unsigned int numvecs,
                goto fail_npresmsk;
 
        /* Stabilize the cpumasks */
-       get_online_cpus();
+       cpus_read_lock();
        build_node_to_cpumask(node_to_cpumask);
 
        /* Spread on present CPUs starting from affd->pre_vectors */
@@ -384,7 +384,7 @@ static int irq_build_affinity_masks(unsigned int startvec, unsigned int numvecs,
                nr_others = ret;
 
  fail_build_affinity:
-       put_online_cpus();
+       cpus_read_unlock();
 
        if (ret >= 0)
                WARN_ON(nr_present + nr_others < numvecs);
@@ -505,9 +505,9 @@ unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
        if (affd->calc_sets) {
                set_vecs = maxvec - resv;
        } else {
-               get_online_cpus();
+               cpus_read_lock();
                set_vecs = cpumask_weight(cpu_possible_mask);
-               put_online_cpus();
+               cpus_read_unlock();
        }
 
        return resv + min(set_vecs, maxvec - resv);
index 7f04c7d..a98bcfc 100644 (file)
@@ -265,8 +265,11 @@ int irq_startup(struct irq_desc *desc, bool resend, bool force)
        } else {
                switch (__irq_startup_managed(desc, aff, force)) {
                case IRQ_STARTUP_NORMAL:
+                       if (d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP)
+                               irq_setup_affinity(desc);
                        ret = __irq_startup(desc);
-                       irq_setup_affinity(desc);
+                       if (!(d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP))
+                               irq_setup_affinity(desc);
                        break;
                case IRQ_STARTUP_MANAGED:
                        irq_do_set_affinity(d, aff, false);
index 02236b1..39a41c5 100644 (file)
@@ -166,7 +166,7 @@ void irq_migrate_all_off_this_cpu(void)
                raw_spin_unlock(&desc->lock);
 
                if (affinity_broken) {
-                       pr_warn_ratelimited("IRQ %u: no longer affine to CPU%u\n",
+                       pr_debug_ratelimited("IRQ %u: no longer affine to CPU%u\n",
                                            irq, smp_processor_id());
                }
        }
index f8f23af..cc7cdd2 100644 (file)
@@ -240,9 +240,8 @@ irq_alloc_generic_chip(const char *name, int num_ct, unsigned int irq_base,
                       void __iomem *reg_base, irq_flow_handler_t handler)
 {
        struct irq_chip_generic *gc;
-       unsigned long sz = sizeof(*gc) + num_ct * sizeof(struct irq_chip_type);
 
-       gc = kzalloc(sz, GFP_KERNEL);
+       gc = kzalloc(struct_size(gc, chip_types, num_ct), GFP_KERNEL);
        if (gc) {
                irq_init_generic_chip(gc, name, num_ct, irq_base, reg_base,
                                      handler);
@@ -288,8 +287,11 @@ int __irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
 {
        struct irq_domain_chip_generic *dgc;
        struct irq_chip_generic *gc;
-       int numchips, sz, i;
        unsigned long flags;
+       int numchips, i;
+       size_t dgc_sz;
+       size_t gc_sz;
+       size_t sz;
        void *tmp;
 
        if (d->gc)
@@ -300,8 +302,9 @@ int __irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
                return -EINVAL;
 
        /* Allocate a pointer, generic chip and chiptypes for each chip */
-       sz = sizeof(*dgc) + numchips * sizeof(gc);
-       sz += numchips * (sizeof(*gc) + num_ct * sizeof(struct irq_chip_type));
+       gc_sz = struct_size(gc, chip_types, num_ct);
+       dgc_sz = struct_size(dgc, gc, numchips);
+       sz = dgc_sz + numchips * gc_sz;
 
        tmp = dgc = kzalloc(sz, GFP_KERNEL);
        if (!dgc)
@@ -314,7 +317,7 @@ int __irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
        d->gc = dgc;
 
        /* Calc pointer to the first generic chip */
-       tmp += sizeof(*dgc) + numchips * sizeof(gc);
+       tmp += dgc_sz;
        for (i = 0; i < numchips; i++) {
                /* Store the pointer to the generic chip */
                dgc->gc[i] = gc = tmp;
@@ -331,7 +334,7 @@ int __irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
                list_add_tail(&gc->list, &gc_list);
                raw_spin_unlock_irqrestore(&gc_lock, flags);
                /* Calc pointer to the next generic chip */
-               tmp += sizeof(*gc) + num_ct * sizeof(struct irq_chip_type);
+               tmp += gc_sz;
        }
        return 0;
 }
index 52f11c7..08ce7da 100644 (file)
 /**
  * irq_reserve_ipi() - Setup an IPI to destination cpumask
  * @domain:    IPI domain
- * @dest:      cpumask of cpus which can receive the IPI
+ * @dest:      cpumask of CPUs which can receive the IPI
  *
  * Allocate a virq that can be used to send IPI to any CPU in dest mask.
  *
- * On success it'll return linux irq number and error code on failure
+ * Return: Linux IRQ number on success or error code on failure
  */
 int irq_reserve_ipi(struct irq_domain *domain,
                             const struct cpumask *dest)
@@ -104,13 +104,13 @@ free_descs:
 
 /**
  * irq_destroy_ipi() - unreserve an IPI that was previously allocated
- * @irq:       linux irq number to be destroyed
- * @dest:      cpumask of cpus which should have the IPI removed
+ * @irq:       Linux IRQ number to be destroyed
+ * @dest:      cpumask of CPUs which should have the IPI removed
  *
  * The IPIs allocated with irq_reserve_ipi() are returned to the system
  * destroying all virqs associated with them.
  *
- * Return 0 on success or error code on failure.
+ * Return: %0 on success or error code on failure.
  */
 int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest)
 {
@@ -150,14 +150,14 @@ int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest)
 }
 
 /**
- * ipi_get_hwirq - Get the hwirq associated with an IPI to a cpu
- * @irq:       linux irq number
- * @cpu:       the target cpu
+ * ipi_get_hwirq - Get the hwirq associated with an IPI to a CPU
+ * @irq:       Linux IRQ number
+ * @cpu:       the target CPU
  *
  * When dealing with coprocessors IPI, we need to inform the coprocessor of
  * the hwirq it needs to use to receive and send IPIs.
  *
- * Returns hwirq value on success and INVALID_HWIRQ on failure.
+ * Return: hwirq value on success or INVALID_HWIRQ on failure.
  */
 irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu)
 {
@@ -216,7 +216,7 @@ static int ipi_send_verify(struct irq_chip *chip, struct irq_data *data,
  * This function is for architecture or core code to speed up IPI sending. Not
  * usable from driver code.
  *
- * Returns zero on success and negative error number on failure.
+ * Return: %0 on success or negative error number on failure.
  */
 int __ipi_send_single(struct irq_desc *desc, unsigned int cpu)
 {
@@ -250,7 +250,7 @@ int __ipi_send_single(struct irq_desc *desc, unsigned int cpu)
 }
 
 /**
- * ipi_send_mask - send an IPI to target Linux SMP CPU(s)
+ * __ipi_send_mask - send an IPI to target Linux SMP CPU(s)
  * @desc:      pointer to irq_desc of the IRQ
  * @dest:      dest CPU(s), must be a subset of the mask passed to
  *             irq_reserve_ipi()
@@ -258,7 +258,7 @@ int __ipi_send_single(struct irq_desc *desc, unsigned int cpu)
  * This function is for architecture or core code to speed up IPI sending. Not
  * usable from driver code.
  *
- * Returns zero on success and negative error number on failure.
+ * Return: %0 on success or negative error number on failure.
  */
 int __ipi_send_mask(struct irq_desc *desc, const struct cpumask *dest)
 {
@@ -298,11 +298,11 @@ int __ipi_send_mask(struct irq_desc *desc, const struct cpumask *dest)
 
 /**
  * ipi_send_single - Send an IPI to a single CPU
- * @virq:      linux irq number from irq_reserve_ipi()
+ * @virq:      Linux IRQ number from irq_reserve_ipi()
  * @cpu:       destination CPU, must in the destination mask passed to
  *             irq_reserve_ipi()
  *
- * Returns zero on success and negative error number on failure.
+ * Return: %0 on success or negative error number on failure.
  */
 int ipi_send_single(unsigned int virq, unsigned int cpu)
 {
@@ -319,11 +319,11 @@ EXPORT_SYMBOL_GPL(ipi_send_single);
 
 /**
  * ipi_send_mask - Send an IPI to target CPU(s)
- * @virq:      linux irq number from irq_reserve_ipi()
+ * @virq:      Linux IRQ number from irq_reserve_ipi()
  * @dest:      dest CPU(s), must be a subset of the mask passed to
  *             irq_reserve_ipi()
  *
- * Returns zero on success and negative error number on failure.
+ * Return: %0 on success or negative error number on failure.
  */
 int ipi_send_mask(unsigned int virq, const struct cpumask *dest)
 {
index fadb937..4e3c29b 100644 (file)
@@ -188,7 +188,7 @@ static ssize_t hwirq_show(struct kobject *kobj,
 
        raw_spin_lock_irq(&desc->lock);
        if (desc->irq_data.domain)
-               ret = sprintf(buf, "%d\n", (int)desc->irq_data.hwirq);
+               ret = sprintf(buf, "%lu\n", desc->irq_data.hwirq);
        raw_spin_unlock_irq(&desc->lock);
 
        return ret;
index 51c483c..62be161 100644 (file)
@@ -1215,6 +1215,7 @@ int irq_domain_disconnect_hierarchy(struct irq_domain *domain,
        irqd->chip = ERR_PTR(-ENOTCONN);
        return 0;
 }
+EXPORT_SYMBOL_GPL(irq_domain_disconnect_hierarchy);
 
 static int irq_domain_trim_hierarchy(unsigned int virq)
 {
index ef30b47..27667e8 100644 (file)
 #include "internals.h"
 
 #if defined(CONFIG_IRQ_FORCED_THREADING) && !defined(CONFIG_PREEMPT_RT)
-__read_mostly bool force_irqthreads;
-EXPORT_SYMBOL_GPL(force_irqthreads);
+DEFINE_STATIC_KEY_FALSE(force_irqthreads_key);
 
 static int __init setup_forced_irqthreads(char *arg)
 {
-       force_irqthreads = true;
+       static_branch_enable(&force_irqthreads_key);
        return 0;
 }
 early_param("threadirqs", setup_forced_irqthreads);
@@ -1260,8 +1259,8 @@ static int irq_thread(void *data)
        irqreturn_t (*handler_fn)(struct irq_desc *desc,
                        struct irqaction *action);
 
-       if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
-                                       &action->thread_flags))
+       if (force_irqthreads() && test_bit(IRQTF_FORCED_THREAD,
+                                          &action->thread_flags))
                handler_fn = irq_forced_thread_fn;
        else
                handler_fn = irq_thread_fn;
@@ -1322,7 +1321,7 @@ EXPORT_SYMBOL_GPL(irq_wake_thread);
 
 static int irq_setup_forced_threading(struct irqaction *new)
 {
-       if (!force_irqthreads)
+       if (!force_irqthreads())
                return 0;
        if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
                return 0;
@@ -2072,9 +2071,9 @@ const void *free_nmi(unsigned int irq, void *dev_id)
  *     request_threaded_irq - allocate an interrupt line
  *     @irq: Interrupt line to allocate
  *     @handler: Function to be called when the IRQ occurs.
- *               Primary handler for threaded interrupts
- *               If NULL and thread_fn != NULL the default
- *               primary handler is installed
+ *               Primary handler for threaded interrupts.
+ *               If handler is NULL and thread_fn != NULL
+ *               the default primary handler is installed.
  *     @thread_fn: Function called from the irq handler thread
  *                 If NULL, no irq thread is created
  *     @irqflags: Interrupt type flags
@@ -2108,7 +2107,7 @@ const void *free_nmi(unsigned int irq, void *dev_id)
  *
  *     IRQF_SHARED             Interrupt is shared
  *     IRQF_TRIGGER_*          Specify active edge(s) or level
- *
+ *     IRQF_ONESHOT            Run thread_fn with interrupt line masked
  */
 int request_threaded_irq(unsigned int irq, irq_handler_t handler,
                         irq_handler_t thread_fn, unsigned long irqflags,
index 578596e..bbfb264 100644 (file)
@@ -280,7 +280,8 @@ void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk)
 /**
  * irq_matrix_alloc_managed - Allocate a managed interrupt in a CPU map
  * @m:         Matrix pointer
- * @cpu:       On which CPU the interrupt should be allocated
+ * @msk:       Which CPUs to search in
+ * @mapped_cpu:        Pointer to store the CPU for which the irq was allocated
  */
 int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk,
                             unsigned int *mapped_cpu)
index c41965e..6a5ecee 100644 (file)
 #include <linux/irqdomain.h>
 #include <linux/msi.h>
 #include <linux/slab.h>
+#include <linux/pci.h>
 
 #include "internals.h"
 
 /**
- * alloc_msi_entry - Allocate an initialize msi_entry
+ * alloc_msi_entry - Allocate an initialized msi_desc
  * @dev:       Pointer to the device for which this is allocated
  * @nvec:      The number of vectors used in this entry
  * @affinity:  Optional pointer to an affinity mask array size of @nvec
  *
- * If @affinity is not NULL then an affinity array[@nvec] is allocated
+ * If @affinity is not %NULL then an affinity array[@nvec] is allocated
  * and the affinity masks and flags from @affinity are copied.
+ *
+ * Return: pointer to allocated &msi_desc on success or %NULL on failure
  */
 struct msi_desc *alloc_msi_entry(struct device *dev, int nvec,
                                 const struct irq_affinity_desc *affinity)
@@ -69,6 +72,139 @@ void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
 }
 EXPORT_SYMBOL_GPL(get_cached_msi_msg);
 
+static ssize_t msi_mode_show(struct device *dev, struct device_attribute *attr,
+                            char *buf)
+{
+       struct msi_desc *entry;
+       bool is_msix = false;
+       unsigned long irq;
+       int retval;
+
+       retval = kstrtoul(attr->attr.name, 10, &irq);
+       if (retval)
+               return retval;
+
+       entry = irq_get_msi_desc(irq);
+       if (!entry)
+               return -ENODEV;
+
+       if (dev_is_pci(dev))
+               is_msix = entry->msi_attrib.is_msix;
+
+       return sysfs_emit(buf, "%s\n", is_msix ? "msix" : "msi");
+}
+
+/**
+ * msi_populate_sysfs - Populate msi_irqs sysfs entries for devices
+ * @dev:       The device(PCI, platform etc) who will get sysfs entries
+ *
+ * Return attribute_group ** so that specific bus MSI can save it to
+ * somewhere during initilizing msi irqs. If devices has no MSI irq,
+ * return NULL; if it fails to populate sysfs, return ERR_PTR
+ */
+const struct attribute_group **msi_populate_sysfs(struct device *dev)
+{
+       const struct attribute_group **msi_irq_groups;
+       struct attribute **msi_attrs, *msi_attr;
+       struct device_attribute *msi_dev_attr;
+       struct attribute_group *msi_irq_group;
+       struct msi_desc *entry;
+       int ret = -ENOMEM;
+       int num_msi = 0;
+       int count = 0;
+       int i;
+
+       /* Determine how many msi entries we have */
+       for_each_msi_entry(entry, dev)
+               num_msi += entry->nvec_used;
+       if (!num_msi)
+               return NULL;
+
+       /* Dynamically create the MSI attributes for the device */
+       msi_attrs = kcalloc(num_msi + 1, sizeof(void *), GFP_KERNEL);
+       if (!msi_attrs)
+               return ERR_PTR(-ENOMEM);
+
+       for_each_msi_entry(entry, dev) {
+               for (i = 0; i < entry->nvec_used; i++) {
+                       msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL);
+                       if (!msi_dev_attr)
+                               goto error_attrs;
+                       msi_attrs[count] = &msi_dev_attr->attr;
+
+                       sysfs_attr_init(&msi_dev_attr->attr);
+                       msi_dev_attr->attr.name = kasprintf(GFP_KERNEL, "%d",
+                                                           entry->irq + i);
+                       if (!msi_dev_attr->attr.name)
+                               goto error_attrs;
+                       msi_dev_attr->attr.mode = 0444;
+                       msi_dev_attr->show = msi_mode_show;
+                       ++count;
+               }
+       }
+
+       msi_irq_group = kzalloc(sizeof(*msi_irq_group), GFP_KERNEL);
+       if (!msi_irq_group)
+               goto error_attrs;
+       msi_irq_group->name = "msi_irqs";
+       msi_irq_group->attrs = msi_attrs;
+
+       msi_irq_groups = kcalloc(2, sizeof(void *), GFP_KERNEL);
+       if (!msi_irq_groups)
+               goto error_irq_group;
+       msi_irq_groups[0] = msi_irq_group;
+
+       ret = sysfs_create_groups(&dev->kobj, msi_irq_groups);
+       if (ret)
+               goto error_irq_groups;
+
+       return msi_irq_groups;
+
+error_irq_groups:
+       kfree(msi_irq_groups);
+error_irq_group:
+       kfree(msi_irq_group);
+error_attrs:
+       count = 0;
+       msi_attr = msi_attrs[count];
+       while (msi_attr) {
+               msi_dev_attr = container_of(msi_attr, struct device_attribute, attr);
+               kfree(msi_attr->name);
+               kfree(msi_dev_attr);
+               ++count;
+               msi_attr = msi_attrs[count];
+       }
+       kfree(msi_attrs);
+       return ERR_PTR(ret);
+}
+
+/**
+ * msi_destroy_sysfs - Destroy msi_irqs sysfs entries for devices
+ * @dev:               The device(PCI, platform etc) who will remove sysfs entries
+ * @msi_irq_groups:    attribute_group for device msi_irqs entries
+ */
+void msi_destroy_sysfs(struct device *dev, const struct attribute_group **msi_irq_groups)
+{
+       struct device_attribute *dev_attr;
+       struct attribute **msi_attrs;
+       int count = 0;
+
+       if (msi_irq_groups) {
+               sysfs_remove_groups(&dev->kobj, msi_irq_groups);
+               msi_attrs = msi_irq_groups[0]->attrs;
+               while (msi_attrs[count]) {
+                       dev_attr = container_of(msi_attrs[count],
+                                       struct device_attribute, attr);
+                       kfree(dev_attr->attr.name);
+                       kfree(dev_attr);
+                       ++count;
+               }
+               kfree(msi_attrs);
+               kfree(msi_irq_groups[0]);
+               kfree(msi_irq_groups);
+       }
+}
+
 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
 static inline void irq_chip_write_msi_msg(struct irq_data *data,
                                          struct msi_msg *msg)
@@ -97,6 +233,8 @@ static void msi_check_level(struct irq_domain *domain, struct msi_msg *msg)
  *
  * Intended to be used by MSI interrupt controllers which are
  * implemented with hierarchical domains.
+ *
+ * Return: IRQ_SET_MASK_* result code
  */
 int msi_domain_set_affinity(struct irq_data *irq_data,
                            const struct cpumask *mask, bool force)
@@ -277,10 +415,12 @@ static void msi_domain_update_chip_ops(struct msi_domain_info *info)
 }
 
 /**
- * msi_create_irq_domain - Create a MSI interrupt domain
+ * msi_create_irq_domain - Create an MSI interrupt domain
  * @fwnode:    Optional fwnode of the interrupt controller
  * @info:      MSI domain info
  * @parent:    Parent irq domain
+ *
+ * Return: pointer to the created &struct irq_domain or %NULL on failure
  */
 struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
                                         struct msi_domain_info *info,
@@ -476,11 +616,6 @@ skip_activate:
        return 0;
 
 cleanup:
-       for_each_msi_vector(desc, i, dev) {
-               irq_data = irq_domain_get_irq_data(domain, i);
-               if (irqd_is_activated(irq_data))
-                       irq_domain_deactivate_irq(irq_data);
-       }
        msi_domain_free_irqs(domain, dev);
        return ret;
 }
@@ -492,7 +627,7 @@ cleanup:
  *             are allocated
  * @nvec:      The number of interrupts to allocate
  *
- * Return0 on success or an error code.
+ * Return: %0 on success or an error code.
  */
 int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
                          int nvec)
@@ -505,7 +640,15 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
 
 void __msi_domain_free_irqs(struct irq_domain *domain, struct device *dev)
 {
+       struct irq_data *irq_data;
        struct msi_desc *desc;
+       int i;
+
+       for_each_msi_vector(desc, i, dev) {
+               irq_data = irq_domain_get_irq_data(domain, i);
+               if (irqd_is_activated(irq_data))
+                       irq_domain_deactivate_irq(irq_data);
+       }
 
        for_each_msi_entry(desc, dev) {
                /*
@@ -521,7 +664,7 @@ void __msi_domain_free_irqs(struct irq_domain *domain, struct device *dev)
 }
 
 /**
- * __msi_domain_free_irqs - Free interrupts from a MSI interrupt @domain associated tp @dev
+ * msi_domain_free_irqs - Free interrupts from a MSI interrupt @domain associated to @dev
  * @domain:    The domain to managing the interrupts
  * @dev:       Pointer to device struct of the device for which the interrupts
  *             are free
@@ -538,8 +681,7 @@ void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev)
  * msi_get_domain_info - Get the MSI interrupt domain info for @domain
  * @domain:    The interrupt domain to retrieve data from
  *
- * Returns the pointer to the msi_domain_info stored in
- * @domain->host_data.
+ * Return: the pointer to the msi_domain_info stored in @domain->host_data.
  */
 struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain)
 {
index ce0adb2..ca71123 100644 (file)
@@ -227,7 +227,7 @@ unlock:
 }
 
 /**
- * irq_pm_syscore_ops - enable interrupt lines early
+ * irq_pm_syscore_resume - enable interrupt lines early
  *
  * Enable all interrupt lines with %IRQF_EARLY_RESUME set.
  */
index 7c5cd42..ee595ec 100644 (file)
@@ -513,7 +513,7 @@ int show_interrupts(struct seq_file *p, void *v)
                seq_printf(p, " %8s", "None");
        }
        if (desc->irq_data.domain)
-               seq_printf(p, " %*d", prec, (int) desc->irq_data.hwirq);
+               seq_printf(p, " %*lu", prec, desc->irq_data.hwirq);
        else
                seq_printf(p, " %*s", prec, "");
 #ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL
index d309d6f..c43e2ac 100644 (file)
@@ -453,6 +453,11 @@ static __always_inline void __irq_timings_store(int irq, struct irqt_stat *irqs,
         */
        index = irq_timings_interval_index(interval);
 
+       if (index > PREDICTION_BUFFER_SIZE - 1) {
+               irqs->count = 0;
+               return;
+       }
+
        /*
         * Store the index as an element of the pattern in another
         * circular array.
@@ -794,12 +799,14 @@ static int __init irq_timings_test_irqs(struct timings_intervals *ti)
 
                __irq_timings_store(irq, irqs, ti->intervals[i]);
                if (irqs->circ_timings[i & IRQ_TIMINGS_MASK] != index) {
+                       ret = -EBADSLT;
                        pr_err("Failed to store in the circular buffer\n");
                        goto out;
                }
        }
 
        if (irqs->count != ti->count) {
+               ret = -ERANGE;
                pr_err("Count differs\n");
                goto out;
        }
index e65de17..1d1d1b0 100644 (file)
@@ -64,7 +64,7 @@ static noinline void microbenchmark(unsigned long iters)
 {
        const struct kcsan_ctx ctx_save = current->kcsan_ctx;
        const bool was_enabled = READ_ONCE(kcsan_enabled);
-       cycles_t cycles;
+       u64 cycles;
 
        /* We may have been called from an atomic region; reset context. */
        memset(&current->kcsan_ctx, 0, sizeof(current->kcsan_ctx));
index 3572808..d51cabf 100644 (file)
@@ -24,7 +24,8 @@ obj-$(CONFIG_SMP) += spinlock.o
 obj-$(CONFIG_LOCK_SPIN_ON_OWNER) += osq_lock.o
 obj-$(CONFIG_PROVE_LOCKING) += spinlock.o
 obj-$(CONFIG_QUEUED_SPINLOCKS) += qspinlock.o
-obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
+obj-$(CONFIG_RT_MUTEXES) += rtmutex_api.o
+obj-$(CONFIG_PREEMPT_RT) += spinlock_rt.o ww_rt_mutex.o
 obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
 obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
 obj-$(CONFIG_QUEUED_RWLOCKS) += qrwlock.o
index b3adb40..7c5a4a0 100644 (file)
@@ -59,7 +59,7 @@ static struct task_struct **writer_tasks;
 static struct task_struct **reader_tasks;
 
 static bool lock_is_write_held;
-static bool lock_is_read_held;
+static atomic_t lock_is_read_held;
 static unsigned long last_lock_release;
 
 struct lock_stress_stats {
@@ -682,7 +682,7 @@ static int lock_torture_writer(void *arg)
                if (WARN_ON_ONCE(lock_is_write_held))
                        lwsp->n_lock_fail++;
                lock_is_write_held = true;
-               if (WARN_ON_ONCE(lock_is_read_held))
+               if (WARN_ON_ONCE(atomic_read(&lock_is_read_held)))
                        lwsp->n_lock_fail++; /* rare, but... */
 
                lwsp->n_lock_acquired++;
@@ -717,13 +717,13 @@ static int lock_torture_reader(void *arg)
                        schedule_timeout_uninterruptible(1);
 
                cxt.cur_ops->readlock(tid);
-               lock_is_read_held = true;
+               atomic_inc(&lock_is_read_held);
                if (WARN_ON_ONCE(lock_is_write_held))
                        lrsp->n_lock_fail++; /* rare, but... */
 
                lrsp->n_lock_acquired++;
                cxt.cur_ops->read_delay(&rand);
-               lock_is_read_held = false;
+               atomic_dec(&lock_is_read_held);
                cxt.cur_ops->readunlock(tid);
 
                stutter_wait("lock_torture_reader");
@@ -738,20 +738,22 @@ static int lock_torture_reader(void *arg)
 static void __torture_print_stats(char *page,
                                  struct lock_stress_stats *statp, bool write)
 {
+       long cur;
        bool fail = false;
        int i, n_stress;
-       long max = 0, min = statp ? statp[0].n_lock_acquired : 0;
+       long max = 0, min = statp ? data_race(statp[0].n_lock_acquired) : 0;
        long long sum = 0;
 
        n_stress = write ? cxt.nrealwriters_stress : cxt.nrealreaders_stress;
        for (i = 0; i < n_stress; i++) {
-               if (statp[i].n_lock_fail)
+               if (data_race(statp[i].n_lock_fail))
                        fail = true;
-               sum += statp[i].n_lock_acquired;
-               if (max < statp[i].n_lock_acquired)
-                       max = statp[i].n_lock_acquired;
-               if (min > statp[i].n_lock_acquired)
-                       min = statp[i].n_lock_acquired;
+               cur = data_race(statp[i].n_lock_acquired);
+               sum += cur;
+               if (max < cur)
+                       max = cur;
+               if (min > cur)
+                       min = cur;
        }
        page += sprintf(page,
                        "%s:  Total: %lld  Max/Min: %ld/%ld %s  Fail: %d %s\n",
@@ -996,7 +998,6 @@ static int __init lock_torture_init(void)
                }
 
                if (nreaders_stress) {
-                       lock_is_read_held = false;
                        cxt.lrsa = kmalloc_array(cxt.nrealreaders_stress,
                                                 sizeof(*cxt.lrsa),
                                                 GFP_KERNEL);
index db93015..bc8abb8 100644 (file)
@@ -1,6 +1,4 @@
 /*
- * kernel/mutex-debug.c
- *
  * Debugging code for mutexes
  *
  * Started by Ingo Molnar:
@@ -22,7 +20,7 @@
 #include <linux/interrupt.h>
 #include <linux/debug_locks.h>
 
-#include "mutex-debug.h"
+#include "mutex.h"
 
 /*
  * Must be called with lock->wait_lock held.
@@ -32,6 +30,7 @@ void debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter)
        memset(waiter, MUTEX_DEBUG_INIT, sizeof(*waiter));
        waiter->magic = waiter;
        INIT_LIST_HEAD(&waiter->list);
+       waiter->ww_ctx = MUTEX_POISON_WW_CTX;
 }
 
 void debug_mutex_wake_waiter(struct mutex *lock, struct mutex_waiter *waiter)
diff --git a/kernel/locking/mutex-debug.h b/kernel/locking/mutex-debug.h
deleted file mode 100644 (file)
index 53e631e..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Mutexes: blocking mutual exclusion locks
- *
- * started by Ingo Molnar:
- *
- *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
- *
- * This file contains mutex debugging related internal declarations,
- * prototypes and inline functions, for the CONFIG_DEBUG_MUTEXES case.
- * More details are in kernel/mutex-debug.c.
- */
-
-/*
- * This must be called with lock->wait_lock held.
- */
-extern void debug_mutex_lock_common(struct mutex *lock,
-                                   struct mutex_waiter *waiter);
-extern void debug_mutex_wake_waiter(struct mutex *lock,
-                                   struct mutex_waiter *waiter);
-extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
-extern void debug_mutex_add_waiter(struct mutex *lock,
-                                  struct mutex_waiter *waiter,
-                                  struct task_struct *task);
-extern void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
-                               struct task_struct *task);
-extern void debug_mutex_unlock(struct mutex *lock);
-extern void debug_mutex_init(struct mutex *lock, const char *name,
-                            struct lock_class_key *key);
index d2df5e6..d456579 100644 (file)
 #include <linux/debug_locks.h>
 #include <linux/osq_lock.h>
 
+#ifndef CONFIG_PREEMPT_RT
+#include "mutex.h"
+
 #ifdef CONFIG_DEBUG_MUTEXES
-# include "mutex-debug.h"
+# define MUTEX_WARN_ON(cond) DEBUG_LOCKS_WARN_ON(cond)
 #else
-# include "mutex.h"
+# define MUTEX_WARN_ON(cond)
 #endif
 
 void
 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
 {
        atomic_long_set(&lock->owner, 0);
-       spin_lock_init(&lock->wait_lock);
+       raw_spin_lock_init(&lock->wait_lock);
        INIT_LIST_HEAD(&lock->wait_list);
 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
        osq_lock_init(&lock->osq);
@@ -91,55 +94,56 @@ static inline unsigned long __owner_flags(unsigned long owner)
        return owner & MUTEX_FLAGS;
 }
 
-/*
- * Trylock variant that returns the owning task on failure.
- */
-static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
+static inline struct task_struct *__mutex_trylock_common(struct mutex *lock, bool handoff)
 {
        unsigned long owner, curr = (unsigned long)current;
 
        owner = atomic_long_read(&lock->owner);
        for (;;) { /* must loop, can race against a flag */
-               unsigned long old, flags = __owner_flags(owner);
+               unsigned long flags = __owner_flags(owner);
                unsigned long task = owner & ~MUTEX_FLAGS;
 
                if (task) {
-                       if (likely(task != curr))
-                               break;
-
-                       if (likely(!(flags & MUTEX_FLAG_PICKUP)))
+                       if (flags & MUTEX_FLAG_PICKUP) {
+                               if (task != curr)
+                                       break;
+                               flags &= ~MUTEX_FLAG_PICKUP;
+                       } else if (handoff) {
+                               if (flags & MUTEX_FLAG_HANDOFF)
+                                       break;
+                               flags |= MUTEX_FLAG_HANDOFF;
+                       } else {
                                break;
-
-                       flags &= ~MUTEX_FLAG_PICKUP;
+                       }
                } else {
-#ifdef CONFIG_DEBUG_MUTEXES
-                       DEBUG_LOCKS_WARN_ON(flags & MUTEX_FLAG_PICKUP);
-#endif
+                       MUTEX_WARN_ON(flags & (MUTEX_FLAG_HANDOFF | MUTEX_FLAG_PICKUP));
+                       task = curr;
                }
 
-               /*
-                * We set the HANDOFF bit, we must make sure it doesn't live
-                * past the point where we acquire it. This would be possible
-                * if we (accidentally) set the bit on an unlocked mutex.
-                */
-               flags &= ~MUTEX_FLAG_HANDOFF;
-
-               old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags);
-               if (old == owner)
-                       return NULL;
-
-               owner = old;
+               if (atomic_long_try_cmpxchg_acquire(&lock->owner, &owner, task | flags)) {
+                       if (task == curr)
+                               return NULL;
+                       break;
+               }
        }
 
        return __owner_task(owner);
 }
 
+/*
+ * Trylock or set HANDOFF
+ */
+static inline bool __mutex_trylock_or_handoff(struct mutex *lock, bool handoff)
+{
+       return !__mutex_trylock_common(lock, handoff);
+}
+
 /*
  * Actual trylock that will work on any unlocked state.
  */
 static inline bool __mutex_trylock(struct mutex *lock)
 {
-       return !__mutex_trylock_or_owner(lock);
+       return !__mutex_trylock_common(lock, false);
 }
 
 #ifndef CONFIG_DEBUG_LOCK_ALLOC
@@ -168,10 +172,7 @@ static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
 {
        unsigned long curr = (unsigned long)current;
 
-       if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr)
-               return true;
-
-       return false;
+       return atomic_long_try_cmpxchg_release(&lock->owner, &curr, 0UL);
 }
 #endif
 
@@ -226,23 +227,18 @@ static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
        unsigned long owner = atomic_long_read(&lock->owner);
 
        for (;;) {
-               unsigned long old, new;
+               unsigned long new;
 
-#ifdef CONFIG_DEBUG_MUTEXES
-               DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
-               DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
-#endif
+               MUTEX_WARN_ON(__owner_task(owner) != current);
+               MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP);
 
                new = (owner & MUTEX_FLAG_WAITERS);
                new |= (unsigned long)task;
                if (task)
                        new |= MUTEX_FLAG_PICKUP;
 
-               old = atomic_long_cmpxchg_release(&lock->owner, owner, new);
-               if (old == owner)
+               if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, new))
                        break;
-
-               owner = old;
        }
 }
 
@@ -286,218 +282,18 @@ void __sched mutex_lock(struct mutex *lock)
 EXPORT_SYMBOL(mutex_lock);
 #endif
 
-/*
- * Wait-Die:
- *   The newer transactions are killed when:
- *     It (the new transaction) makes a request for a lock being held
- *     by an older transaction.
- *
- * Wound-Wait:
- *   The newer transactions are wounded when:
- *     An older transaction makes a request for a lock being held by
- *     the newer transaction.
- */
-
-/*
- * Associate the ww_mutex @ww with the context @ww_ctx under which we acquired
- * it.
- */
-static __always_inline void
-ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
-{
-#ifdef CONFIG_DEBUG_MUTEXES
-       /*
-        * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
-        * but released with a normal mutex_unlock in this call.
-        *
-        * This should never happen, always use ww_mutex_unlock.
-        */
-       DEBUG_LOCKS_WARN_ON(ww->ctx);
-
-       /*
-        * Not quite done after calling ww_acquire_done() ?
-        */
-       DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
+#include "ww_mutex.h"
 
-       if (ww_ctx->contending_lock) {
-               /*
-                * After -EDEADLK you tried to
-                * acquire a different ww_mutex? Bad!
-                */
-               DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
-
-               /*
-                * You called ww_mutex_lock after receiving -EDEADLK,
-                * but 'forgot' to unlock everything else first?
-                */
-               DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
-               ww_ctx->contending_lock = NULL;
-       }
-
-       /*
-        * Naughty, using a different class will lead to undefined behavior!
-        */
-       DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
-#endif
-       ww_ctx->acquired++;
-       ww->ctx = ww_ctx;
-}
-
-/*
- * Determine if context @a is 'after' context @b. IOW, @a is a younger
- * transaction than @b and depending on algorithm either needs to wait for
- * @b or die.
- */
-static inline bool __sched
-__ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
-{
-
-       return (signed long)(a->stamp - b->stamp) > 0;
-}
-
-/*
- * Wait-Die; wake a younger waiter context (when locks held) such that it can
- * die.
- *
- * Among waiters with context, only the first one can have other locks acquired
- * already (ctx->acquired > 0), because __ww_mutex_add_waiter() and
- * __ww_mutex_check_kill() wake any but the earliest context.
- */
-static bool __sched
-__ww_mutex_die(struct mutex *lock, struct mutex_waiter *waiter,
-              struct ww_acquire_ctx *ww_ctx)
-{
-       if (!ww_ctx->is_wait_die)
-               return false;
-
-       if (waiter->ww_ctx->acquired > 0 &&
-                       __ww_ctx_stamp_after(waiter->ww_ctx, ww_ctx)) {
-               debug_mutex_wake_waiter(lock, waiter);
-               wake_up_process(waiter->task);
-       }
-
-       return true;
-}
-
-/*
- * Wound-Wait; wound a younger @hold_ctx if it holds the lock.
- *
- * Wound the lock holder if there are waiters with older transactions than
- * the lock holders. Even if multiple waiters may wound the lock holder,
- * it's sufficient that only one does.
- */
-static bool __ww_mutex_wound(struct mutex *lock,
-                            struct ww_acquire_ctx *ww_ctx,
-                            struct ww_acquire_ctx *hold_ctx)
-{
-       struct task_struct *owner = __mutex_owner(lock);
-
-       lockdep_assert_held(&lock->wait_lock);
-
-       /*
-        * Possible through __ww_mutex_add_waiter() when we race with
-        * ww_mutex_set_context_fastpath(). In that case we'll get here again
-        * through __ww_mutex_check_waiters().
-        */
-       if (!hold_ctx)
-               return false;
-
-       /*
-        * Can have !owner because of __mutex_unlock_slowpath(), but if owner,
-        * it cannot go away because we'll have FLAG_WAITERS set and hold
-        * wait_lock.
-        */
-       if (!owner)
-               return false;
-
-       if (ww_ctx->acquired > 0 && __ww_ctx_stamp_after(hold_ctx, ww_ctx)) {
-               hold_ctx->wounded = 1;
-
-               /*
-                * wake_up_process() paired with set_current_state()
-                * inserts sufficient barriers to make sure @owner either sees
-                * it's wounded in __ww_mutex_check_kill() or has a
-                * wakeup pending to re-read the wounded state.
-                */
-               if (owner != current)
-                       wake_up_process(owner);
-
-               return true;
-       }
-
-       return false;
-}
-
-/*
- * We just acquired @lock under @ww_ctx, if there are later contexts waiting
- * behind us on the wait-list, check if they need to die, or wound us.
- *
- * See __ww_mutex_add_waiter() for the list-order construction; basically the
- * list is ordered by stamp, smallest (oldest) first.
- *
- * This relies on never mixing wait-die/wound-wait on the same wait-list;
- * which is currently ensured by that being a ww_class property.
- *
- * The current task must not be on the wait list.
- */
-static void __sched
-__ww_mutex_check_waiters(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
-{
-       struct mutex_waiter *cur;
-
-       lockdep_assert_held(&lock->wait_lock);
-
-       list_for_each_entry(cur, &lock->wait_list, list) {
-               if (!cur->ww_ctx)
-                       continue;
-
-               if (__ww_mutex_die(lock, cur, ww_ctx) ||
-                   __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx))
-                       break;
-       }
-}
+#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
 
 /*
- * After acquiring lock with fastpath, where we do not hold wait_lock, set ctx
- * and wake up any waiters so they can recheck.
+ * Trylock variant that returns the owning task on failure.
  */
-static __always_inline void
-ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
+static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
 {
-       ww_mutex_lock_acquired(lock, ctx);
-
-       /*
-        * The lock->ctx update should be visible on all cores before
-        * the WAITERS check is done, otherwise contended waiters might be
-        * missed. The contended waiters will either see ww_ctx == NULL
-        * and keep spinning, or it will acquire wait_lock, add itself
-        * to waiter list and sleep.
-        */
-       smp_mb(); /* See comments above and below. */
-
-       /*
-        * [W] ww->ctx = ctx        [W] MUTEX_FLAG_WAITERS
-        *     MB                       MB
-        * [R] MUTEX_FLAG_WAITERS   [R] ww->ctx
-        *
-        * The memory barrier above pairs with the memory barrier in
-        * __ww_mutex_add_waiter() and makes sure we either observe ww->ctx
-        * and/or !empty list.
-        */
-       if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS)))
-               return;
-
-       /*
-        * Uh oh, we raced in fastpath, check if any of the waiters need to
-        * die or wound us.
-        */
-       spin_lock(&lock->base.wait_lock);
-       __ww_mutex_check_waiters(&lock->base, ctx);
-       spin_unlock(&lock->base.wait_lock);
+       return __mutex_trylock_common(lock, false);
 }
 
-#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
-
 static inline
 bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
                            struct mutex_waiter *waiter)
@@ -754,171 +550,11 @@ EXPORT_SYMBOL(mutex_unlock);
  */
 void __sched ww_mutex_unlock(struct ww_mutex *lock)
 {
-       /*
-        * The unlocking fastpath is the 0->1 transition from 'locked'
-        * into 'unlocked' state:
-        */
-       if (lock->ctx) {
-#ifdef CONFIG_DEBUG_MUTEXES
-               DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
-#endif
-               if (lock->ctx->acquired > 0)
-                       lock->ctx->acquired--;
-               lock->ctx = NULL;
-       }
-
+       __ww_mutex_unlock(lock);
        mutex_unlock(&lock->base);
 }
 EXPORT_SYMBOL(ww_mutex_unlock);
 
-
-static __always_inline int __sched
-__ww_mutex_kill(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
-{
-       if (ww_ctx->acquired > 0) {
-#ifdef CONFIG_DEBUG_MUTEXES
-               struct ww_mutex *ww;
-
-               ww = container_of(lock, struct ww_mutex, base);
-               DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
-               ww_ctx->contending_lock = ww;
-#endif
-               return -EDEADLK;
-       }
-
-       return 0;
-}
-
-
-/*
- * Check the wound condition for the current lock acquire.
- *
- * Wound-Wait: If we're wounded, kill ourself.
- *
- * Wait-Die: If we're trying to acquire a lock already held by an older
- *           context, kill ourselves.
- *
- * Since __ww_mutex_add_waiter() orders the wait-list on stamp, we only have to
- * look at waiters before us in the wait-list.
- */
-static inline int __sched
-__ww_mutex_check_kill(struct mutex *lock, struct mutex_waiter *waiter,
-                     struct ww_acquire_ctx *ctx)
-{
-       struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
-       struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
-       struct mutex_waiter *cur;
-
-       if (ctx->acquired == 0)
-               return 0;
-
-       if (!ctx->is_wait_die) {
-               if (ctx->wounded)
-                       return __ww_mutex_kill(lock, ctx);
-
-               return 0;
-       }
-
-       if (hold_ctx && __ww_ctx_stamp_after(ctx, hold_ctx))
-               return __ww_mutex_kill(lock, ctx);
-
-       /*
-        * If there is a waiter in front of us that has a context, then its
-        * stamp is earlier than ours and we must kill ourself.
-        */
-       cur = waiter;
-       list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) {
-               if (!cur->ww_ctx)
-                       continue;
-
-               return __ww_mutex_kill(lock, ctx);
-       }
-
-       return 0;
-}
-
-/*
- * Add @waiter to the wait-list, keep the wait-list ordered by stamp, smallest
- * first. Such that older contexts are preferred to acquire the lock over
- * younger contexts.
- *
- * Waiters without context are interspersed in FIFO order.
- *
- * Furthermore, for Wait-Die kill ourself immediately when possible (there are
- * older contexts already waiting) to avoid unnecessary waiting and for
- * Wound-Wait ensure we wound the owning context when it is younger.
- */
-static inline int __sched
-__ww_mutex_add_waiter(struct mutex_waiter *waiter,
-                     struct mutex *lock,
-                     struct ww_acquire_ctx *ww_ctx)
-{
-       struct mutex_waiter *cur;
-       struct list_head *pos;
-       bool is_wait_die;
-
-       if (!ww_ctx) {
-               __mutex_add_waiter(lock, waiter, &lock->wait_list);
-               return 0;
-       }
-
-       is_wait_die = ww_ctx->is_wait_die;
-
-       /*
-        * Add the waiter before the first waiter with a higher stamp.
-        * Waiters without a context are skipped to avoid starving
-        * them. Wait-Die waiters may die here. Wound-Wait waiters
-        * never die here, but they are sorted in stamp order and
-        * may wound the lock holder.
-        */
-       pos = &lock->wait_list;
-       list_for_each_entry_reverse(cur, &lock->wait_list, list) {
-               if (!cur->ww_ctx)
-                       continue;
-
-               if (__ww_ctx_stamp_after(ww_ctx, cur->ww_ctx)) {
-                       /*
-                        * Wait-Die: if we find an older context waiting, there
-                        * is no point in queueing behind it, as we'd have to
-                        * die the moment it would acquire the lock.
-                        */
-                       if (is_wait_die) {
-                               int ret = __ww_mutex_kill(lock, ww_ctx);
-
-                               if (ret)
-                                       return ret;
-                       }
-
-                       break;
-               }
-
-               pos = &cur->list;
-
-               /* Wait-Die: ensure younger waiters die. */
-               __ww_mutex_die(lock, cur, ww_ctx);
-       }
-
-       __mutex_add_waiter(lock, waiter, pos);
-
-       /*
-        * Wound-Wait: if we're blocking on a mutex owned by a younger context,
-        * wound that such that we might proceed.
-        */
-       if (!is_wait_die) {
-               struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
-
-               /*
-                * See ww_mutex_set_context_fastpath(). Orders setting
-                * MUTEX_FLAG_WAITERS vs the ww->ctx load,
-                * such that either we or the fastpath will wound @ww->ctx.
-                */
-               smp_mb();
-               __ww_mutex_wound(lock, ww_ctx, ww->ctx);
-       }
-
-       return 0;
-}
-
 /*
  * Lock a mutex (possibly interruptible), slowpath:
  */
@@ -928,7 +564,6 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
                    struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
 {
        struct mutex_waiter waiter;
-       bool first = false;
        struct ww_mutex *ww;
        int ret;
 
@@ -937,9 +572,7 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
 
        might_sleep();
 
-#ifdef CONFIG_DEBUG_MUTEXES
-       DEBUG_LOCKS_WARN_ON(lock->magic != lock);
-#endif
+       MUTEX_WARN_ON(lock->magic != lock);
 
        ww = container_of(lock, struct ww_mutex, base);
        if (ww_ctx) {
@@ -953,6 +586,10 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
                 */
                if (ww_ctx->acquired == 0)
                        ww_ctx->wounded = 0;
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+               nest_lock = &ww_ctx->dep_map;
+#endif
        }
 
        preempt_disable();
@@ -968,7 +605,7 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
                return 0;
        }
 
-       spin_lock(&lock->wait_lock);
+       raw_spin_lock(&lock->wait_lock);
        /*
         * After waiting to acquire the wait_lock, try again.
         */
@@ -980,17 +617,15 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
        }
 
        debug_mutex_lock_common(lock, &waiter);
+       waiter.task = current;
+       if (use_ww_ctx)
+               waiter.ww_ctx = ww_ctx;
 
        lock_contended(&lock->dep_map, ip);
 
        if (!use_ww_ctx) {
                /* add waiting tasks to the end of the waitqueue (FIFO): */
                __mutex_add_waiter(lock, &waiter, &lock->wait_list);
-
-
-#ifdef CONFIG_DEBUG_MUTEXES
-               waiter.ww_ctx = MUTEX_POISON_WW_CTX;
-#endif
        } else {
                /*
                 * Add in stamp order, waking up waiters that must kill
@@ -999,14 +634,12 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
                ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
                if (ret)
                        goto err_early_kill;
-
-               waiter.ww_ctx = ww_ctx;
        }
 
-       waiter.task = current;
-
        set_current_state(state);
        for (;;) {
+               bool first;
+
                /*
                 * Once we hold wait_lock, we're serialized against
                 * mutex_unlock() handing the lock off to us, do a trylock
@@ -1032,18 +665,10 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
                                goto err;
                }
 
-               spin_unlock(&lock->wait_lock);
+               raw_spin_unlock(&lock->wait_lock);
                schedule_preempt_disabled();
 
-               /*
-                * ww_mutex needs to always recheck its position since its waiter
-                * list is not FIFO ordered.
-                */
-               if (ww_ctx || !first) {
-                       first = __mutex_waiter_is_first(lock, &waiter);
-                       if (first)
-                               __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
-               }
+               first = __mutex_waiter_is_first(lock, &waiter);
 
                set_current_state(state);
                /*
@@ -1051,13 +676,13 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
                 * state back to RUNNING and fall through the next schedule(),
                 * or we must see its unlock and acquire.
                 */
-               if (__mutex_trylock(lock) ||
+               if (__mutex_trylock_or_handoff(lock, first) ||
                    (first && mutex_optimistic_spin(lock, ww_ctx, &waiter)))
                        break;
 
-               spin_lock(&lock->wait_lock);
+               raw_spin_lock(&lock->wait_lock);
        }
-       spin_lock(&lock->wait_lock);
+       raw_spin_lock(&lock->wait_lock);
 acquired:
        __set_current_state(TASK_RUNNING);
 
@@ -1082,7 +707,7 @@ skip_wait:
        if (ww_ctx)
                ww_mutex_lock_acquired(ww, ww_ctx);
 
-       spin_unlock(&lock->wait_lock);
+       raw_spin_unlock(&lock->wait_lock);
        preempt_enable();
        return 0;
 
@@ -1090,7 +715,7 @@ err:
        __set_current_state(TASK_RUNNING);
        __mutex_remove_waiter(lock, &waiter);
 err_early_kill:
-       spin_unlock(&lock->wait_lock);
+       raw_spin_unlock(&lock->wait_lock);
        debug_mutex_free_waiter(&waiter);
        mutex_release(&lock->dep_map, ip);
        preempt_enable();
@@ -1106,10 +731,9 @@ __mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
 
 static int __sched
 __ww_mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
-               struct lockdep_map *nest_lock, unsigned long ip,
-               struct ww_acquire_ctx *ww_ctx)
+               unsigned long ip, struct ww_acquire_ctx *ww_ctx)
 {
-       return __mutex_lock_common(lock, state, subclass, nest_lock, ip, ww_ctx, true);
+       return __mutex_lock_common(lock, state, subclass, NULL, ip, ww_ctx, true);
 }
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -1189,8 +813,7 @@ ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 
        might_sleep();
        ret =  __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
-                              0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
-                              ctx);
+                              0, _RET_IP_, ctx);
        if (!ret && ctx && ctx->acquired > 1)
                return ww_mutex_deadlock_injection(lock, ctx);
 
@@ -1205,8 +828,7 @@ ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 
        might_sleep();
        ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
-                             0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
-                             ctx);
+                             0, _RET_IP_, ctx);
 
        if (!ret && ctx && ctx->acquired > 1)
                return ww_mutex_deadlock_injection(lock, ctx);
@@ -1237,29 +859,21 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
         */
        owner = atomic_long_read(&lock->owner);
        for (;;) {
-               unsigned long old;
-
-#ifdef CONFIG_DEBUG_MUTEXES
-               DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
-               DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
-#endif
+               MUTEX_WARN_ON(__owner_task(owner) != current);
+               MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP);
 
                if (owner & MUTEX_FLAG_HANDOFF)
                        break;
 
-               old = atomic_long_cmpxchg_release(&lock->owner, owner,
-                                                 __owner_flags(owner));
-               if (old == owner) {
+               if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, __owner_flags(owner))) {
                        if (owner & MUTEX_FLAG_WAITERS)
                                break;
 
                        return;
                }
-
-               owner = old;
        }
 
-       spin_lock(&lock->wait_lock);
+       raw_spin_lock(&lock->wait_lock);
        debug_mutex_unlock(lock);
        if (!list_empty(&lock->wait_list)) {
                /* get the first entry from the wait-list: */
@@ -1276,7 +890,7 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
        if (owner & MUTEX_FLAG_HANDOFF)
                __mutex_handoff(lock, next);
 
-       spin_unlock(&lock->wait_lock);
+       raw_spin_unlock(&lock->wait_lock);
 
        wake_up_q(&wake_q);
 }
@@ -1380,7 +994,7 @@ __mutex_lock_interruptible_slowpath(struct mutex *lock)
 static noinline int __sched
 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 {
-       return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, NULL,
+       return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0,
                               _RET_IP_, ctx);
 }
 
@@ -1388,7 +1002,7 @@ static noinline int __sched
 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
                                            struct ww_acquire_ctx *ctx)
 {
-       return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, NULL,
+       return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0,
                               _RET_IP_, ctx);
 }
 
@@ -1412,9 +1026,7 @@ int __sched mutex_trylock(struct mutex *lock)
 {
        bool locked;
 
-#ifdef CONFIG_DEBUG_MUTEXES
-       DEBUG_LOCKS_WARN_ON(lock->magic != lock);
-#endif
+       MUTEX_WARN_ON(lock->magic != lock);
 
        locked = __mutex_trylock(lock);
        if (locked)
@@ -1455,7 +1067,8 @@ ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 }
 EXPORT_SYMBOL(ww_mutex_lock_interruptible);
 
-#endif
+#endif /* !CONFIG_DEBUG_LOCK_ALLOC */
+#endif /* !CONFIG_PREEMPT_RT */
 
 /**
  * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
index f0c710b..0b2a79c 100644 (file)
@@ -5,19 +5,41 @@
  * started by Ingo Molnar:
  *
  *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
- *
- * This file contains mutex debugging related internal prototypes, for the
- * !CONFIG_DEBUG_MUTEXES case. Most of them are NOPs:
  */
 
-#define debug_mutex_wake_waiter(lock, waiter)          do { } while (0)
-#define debug_mutex_free_waiter(waiter)                        do { } while (0)
-#define debug_mutex_add_waiter(lock, waiter, ti)       do { } while (0)
-#define debug_mutex_remove_waiter(lock, waiter, ti)     do { } while (0)
-#define debug_mutex_unlock(lock)                       do { } while (0)
-#define debug_mutex_init(lock, name, key)              do { } while (0)
+/*
+ * This is the control structure for tasks blocked on mutex, which resides
+ * on the blocked task's kernel stack:
+ */
+struct mutex_waiter {
+       struct list_head        list;
+       struct task_struct      *task;
+       struct ww_acquire_ctx   *ww_ctx;
+#ifdef CONFIG_DEBUG_MUTEXES
+       void                    *magic;
+#endif
+};
 
-static inline void
-debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter)
-{
-}
+#ifdef CONFIG_DEBUG_MUTEXES
+extern void debug_mutex_lock_common(struct mutex *lock,
+                                   struct mutex_waiter *waiter);
+extern void debug_mutex_wake_waiter(struct mutex *lock,
+                                   struct mutex_waiter *waiter);
+extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
+extern void debug_mutex_add_waiter(struct mutex *lock,
+                                  struct mutex_waiter *waiter,
+                                  struct task_struct *task);
+extern void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+                                     struct task_struct *task);
+extern void debug_mutex_unlock(struct mutex *lock);
+extern void debug_mutex_init(struct mutex *lock, const char *name,
+                            struct lock_class_key *key);
+#else /* CONFIG_DEBUG_MUTEXES */
+# define debug_mutex_lock_common(lock, waiter)         do { } while (0)
+# define debug_mutex_wake_waiter(lock, waiter)         do { } while (0)
+# define debug_mutex_free_waiter(waiter)               do { } while (0)
+# define debug_mutex_add_waiter(lock, waiter, ti)      do { } while (0)
+# define debug_mutex_remove_waiter(lock, waiter, ti)   do { } while (0)
+# define debug_mutex_unlock(lock)                      do { } while (0)
+# define debug_mutex_init(lock, name, key)             do { } while (0)
+#endif /* !CONFIG_DEBUG_MUTEXES */
index b5d9bb5..8eabdc7 100644 (file)
@@ -8,20 +8,58 @@
  *  Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
  *  Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
  *  Copyright (C) 2006 Esben Nielsen
+ * Adaptive Spinlocks:
+ *  Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich,
+ *                                  and Peter Morreale,
+ * Adaptive Spinlocks simplification:
+ *  Copyright (C) 2008 Red Hat, Inc., Steven Rostedt <srostedt@redhat.com>
  *
  *  See Documentation/locking/rt-mutex-design.rst for details.
  */
-#include <linux/spinlock.h>
-#include <linux/export.h>
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
+#include <linux/sched/deadline.h>
 #include <linux/sched/signal.h>
 #include <linux/sched/rt.h>
-#include <linux/sched/deadline.h>
 #include <linux/sched/wake_q.h>
-#include <linux/sched/debug.h>
-#include <linux/timer.h>
+#include <linux/ww_mutex.h>
 
 #include "rtmutex_common.h"
 
+#ifndef WW_RT
+# define build_ww_mutex()      (false)
+# define ww_container_of(rtm)  NULL
+
+static inline int __ww_mutex_add_waiter(struct rt_mutex_waiter *waiter,
+                                       struct rt_mutex *lock,
+                                       struct ww_acquire_ctx *ww_ctx)
+{
+       return 0;
+}
+
+static inline void __ww_mutex_check_waiters(struct rt_mutex *lock,
+                                           struct ww_acquire_ctx *ww_ctx)
+{
+}
+
+static inline void ww_mutex_lock_acquired(struct ww_mutex *lock,
+                                         struct ww_acquire_ctx *ww_ctx)
+{
+}
+
+static inline int __ww_mutex_check_kill(struct rt_mutex *lock,
+                                       struct rt_mutex_waiter *waiter,
+                                       struct ww_acquire_ctx *ww_ctx)
+{
+       return 0;
+}
+
+#else
+# define build_ww_mutex()      (true)
+# define ww_container_of(rtm)  container_of(rtm, struct ww_mutex, base)
+# include "ww_mutex.h"
+#endif
+
 /*
  * lock->owner state tracking:
  *
@@ -50,7 +88,7 @@
  */
 
 static __always_inline void
-rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner)
+rt_mutex_set_owner(struct rt_mutex_base *lock, struct task_struct *owner)
 {
        unsigned long val = (unsigned long)owner;
 
@@ -60,13 +98,13 @@ rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner)
        WRITE_ONCE(lock->owner, (struct task_struct *)val);
 }
 
-static __always_inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
+static __always_inline void clear_rt_mutex_waiters(struct rt_mutex_base *lock)
 {
        lock->owner = (struct task_struct *)
                        ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
 }
 
-static __always_inline void fixup_rt_mutex_waiters(struct rt_mutex *lock)
+static __always_inline void fixup_rt_mutex_waiters(struct rt_mutex_base *lock)
 {
        unsigned long owner, *p = (unsigned long *) &lock->owner;
 
@@ -141,15 +179,26 @@ static __always_inline void fixup_rt_mutex_waiters(struct rt_mutex *lock)
  * set up.
  */
 #ifndef CONFIG_DEBUG_RT_MUTEXES
-# define rt_mutex_cmpxchg_acquire(l,c,n) (cmpxchg_acquire(&l->owner, c, n) == c)
-# define rt_mutex_cmpxchg_release(l,c,n) (cmpxchg_release(&l->owner, c, n) == c)
+static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex_base *lock,
+                                                    struct task_struct *old,
+                                                    struct task_struct *new)
+{
+       return try_cmpxchg_acquire(&lock->owner, &old, new);
+}
+
+static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock,
+                                                    struct task_struct *old,
+                                                    struct task_struct *new)
+{
+       return try_cmpxchg_release(&lock->owner, &old, new);
+}
 
 /*
  * Callers must hold the ->wait_lock -- which is the whole purpose as we force
  * all future threads that attempt to [Rmw] the lock to the slowpath. As such
  * relaxed semantics suffice.
  */
-static __always_inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
+static __always_inline void mark_rt_mutex_waiters(struct rt_mutex_base *lock)
 {
        unsigned long owner, *p = (unsigned long *) &lock->owner;
 
@@ -165,7 +214,7 @@ static __always_inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
  * 2) Drop lock->wait_lock
  * 3) Try to unlock the lock with cmpxchg
  */
-static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
+static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex_base *lock,
                                                 unsigned long flags)
        __releases(lock->wait_lock)
 {
@@ -201,10 +250,22 @@ static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
 }
 
 #else
-# define rt_mutex_cmpxchg_acquire(l,c,n)       (0)
-# define rt_mutex_cmpxchg_release(l,c,n)       (0)
+static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex_base *lock,
+                                                    struct task_struct *old,
+                                                    struct task_struct *new)
+{
+       return false;
+
+}
+
+static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock,
+                                                    struct task_struct *old,
+                                                    struct task_struct *new)
+{
+       return false;
+}
 
-static __always_inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
+static __always_inline void mark_rt_mutex_waiters(struct rt_mutex_base *lock)
 {
        lock->owner = (struct task_struct *)
                        ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
@@ -213,7 +274,7 @@ static __always_inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
 /*
  * Simple slow path only version: lock->owner is protected by lock->wait_lock.
  */
-static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
+static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex_base *lock,
                                                 unsigned long flags)
        __releases(lock->wait_lock)
 {
@@ -223,11 +284,28 @@ static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
 }
 #endif
 
+static __always_inline int __waiter_prio(struct task_struct *task)
+{
+       int prio = task->prio;
+
+       if (!rt_prio(prio))
+               return DEFAULT_PRIO;
+
+       return prio;
+}
+
+static __always_inline void
+waiter_update_prio(struct rt_mutex_waiter *waiter, struct task_struct *task)
+{
+       waiter->prio = __waiter_prio(task);
+       waiter->deadline = task->dl.deadline;
+}
+
 /*
  * Only use with rt_mutex_waiter_{less,equal}()
  */
 #define task_to_waiter(p)      \
-       &(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = (p)->dl.deadline }
+       &(struct rt_mutex_waiter){ .prio = __waiter_prio(p), .deadline = (p)->dl.deadline }
 
 static __always_inline int rt_mutex_waiter_less(struct rt_mutex_waiter *left,
                                                struct rt_mutex_waiter *right)
@@ -265,22 +343,63 @@ static __always_inline int rt_mutex_waiter_equal(struct rt_mutex_waiter *left,
        return 1;
 }
 
+static inline bool rt_mutex_steal(struct rt_mutex_waiter *waiter,
+                                 struct rt_mutex_waiter *top_waiter)
+{
+       if (rt_mutex_waiter_less(waiter, top_waiter))
+               return true;
+
+#ifdef RT_MUTEX_BUILD_SPINLOCKS
+       /*
+        * Note that RT tasks are excluded from same priority (lateral)
+        * steals to prevent the introduction of an unbounded latency.
+        */
+       if (rt_prio(waiter->prio) || dl_prio(waiter->prio))
+               return false;
+
+       return rt_mutex_waiter_equal(waiter, top_waiter);
+#else
+       return false;
+#endif
+}
+
 #define __node_2_waiter(node) \
        rb_entry((node), struct rt_mutex_waiter, tree_entry)
 
 static __always_inline bool __waiter_less(struct rb_node *a, const struct rb_node *b)
 {
-       return rt_mutex_waiter_less(__node_2_waiter(a), __node_2_waiter(b));
+       struct rt_mutex_waiter *aw = __node_2_waiter(a);
+       struct rt_mutex_waiter *bw = __node_2_waiter(b);
+
+       if (rt_mutex_waiter_less(aw, bw))
+               return 1;
+
+       if (!build_ww_mutex())
+               return 0;
+
+       if (rt_mutex_waiter_less(bw, aw))
+               return 0;
+
+       /* NOTE: relies on waiter->ww_ctx being set before insertion */
+       if (aw->ww_ctx) {
+               if (!bw->ww_ctx)
+                       return 1;
+
+               return (signed long)(aw->ww_ctx->stamp -
+                                    bw->ww_ctx->stamp) < 0;
+       }
+
+       return 0;
 }
 
 static __always_inline void
-rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
+rt_mutex_enqueue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter)
 {
        rb_add_cached(&waiter->tree_entry, &lock->waiters, __waiter_less);
 }
 
 static __always_inline void
-rt_mutex_dequeue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
+rt_mutex_dequeue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter)
 {
        if (RB_EMPTY_NODE(&waiter->tree_entry))
                return;
@@ -326,6 +445,35 @@ static __always_inline void rt_mutex_adjust_prio(struct task_struct *p)
        rt_mutex_setprio(p, pi_task);
 }
 
+/* RT mutex specific wake_q wrappers */
+static __always_inline void rt_mutex_wake_q_add(struct rt_wake_q_head *wqh,
+                                               struct rt_mutex_waiter *w)
+{
+       if (IS_ENABLED(CONFIG_PREEMPT_RT) && w->wake_state != TASK_NORMAL) {
+               if (IS_ENABLED(CONFIG_PROVE_LOCKING))
+                       WARN_ON_ONCE(wqh->rtlock_task);
+               get_task_struct(w->task);
+               wqh->rtlock_task = w->task;
+       } else {
+               wake_q_add(&wqh->head, w->task);
+       }
+}
+
+static __always_inline void rt_mutex_wake_up_q(struct rt_wake_q_head *wqh)
+{
+       if (IS_ENABLED(CONFIG_PREEMPT_RT) && wqh->rtlock_task) {
+               wake_up_state(wqh->rtlock_task, TASK_RTLOCK_WAIT);
+               put_task_struct(wqh->rtlock_task);
+               wqh->rtlock_task = NULL;
+       }
+
+       if (!wake_q_empty(&wqh->head))
+               wake_up_q(&wqh->head);
+
+       /* Pairs with preempt_disable() in mark_wakeup_next_waiter() */
+       preempt_enable();
+}
+
 /*
  * Deadlock detection is conditional:
  *
@@ -343,17 +491,12 @@ static __always_inline bool
 rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter,
                              enum rtmutex_chainwalk chwalk)
 {
-       if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEX))
+       if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES))
                return waiter != NULL;
        return chwalk == RT_MUTEX_FULL_CHAINWALK;
 }
 
-/*
- * Max number of times we'll walk the boosting chain:
- */
-int max_lock_depth = 1024;
-
-static __always_inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
+static __always_inline struct rt_mutex_base *task_blocked_on_lock(struct task_struct *p)
 {
        return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
 }
@@ -423,15 +566,15 @@ static __always_inline struct rt_mutex *task_blocked_on_lock(struct task_struct
  */
 static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task,
                                              enum rtmutex_chainwalk chwalk,
-                                             struct rt_mutex *orig_lock,
-                                             struct rt_mutex *next_lock,
+                                             struct rt_mutex_base *orig_lock,
+                                             struct rt_mutex_base *next_lock,
                                              struct rt_mutex_waiter *orig_waiter,
                                              struct task_struct *top_task)
 {
        struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
        struct rt_mutex_waiter *prerequeue_top_waiter;
        int ret = 0, depth = 0;
-       struct rt_mutex *lock;
+       struct rt_mutex_base *lock;
        bool detect_deadlock;
        bool requeue = true;
 
@@ -513,6 +656,31 @@ static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task,
        if (next_lock != waiter->lock)
                goto out_unlock_pi;
 
+       /*
+        * There could be 'spurious' loops in the lock graph due to ww_mutex,
+        * consider:
+        *
+        *   P1: A, ww_A, ww_B
+        *   P2: ww_B, ww_A
+        *   P3: A
+        *
+        * P3 should not return -EDEADLK because it gets trapped in the cycle
+        * created by P1 and P2 (which will resolve -- and runs into
+        * max_lock_depth above). Therefore disable detect_deadlock such that
+        * the below termination condition can trigger once all relevant tasks
+        * are boosted.
+        *
+        * Even when we start with ww_mutex we can disable deadlock detection,
+        * since we would supress a ww_mutex induced deadlock at [6] anyway.
+        * Supressing it here however is not sufficient since we might still
+        * hit [6] due to adjustment driven iteration.
+        *
+        * NOTE: if someone were to create a deadlock between 2 ww_classes we'd
+        * utterly fail to report it; lockdep should.
+        */
+       if (IS_ENABLED(CONFIG_PREEMPT_RT) && waiter->ww_ctx && detect_deadlock)
+               detect_deadlock = false;
+
        /*
         * Drop out, when the task has no waiters. Note,
         * top_waiter can be NULL, when we are in the deboosting
@@ -574,8 +742,21 @@ static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task,
         * walk, we detected a deadlock.
         */
        if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
-               raw_spin_unlock(&lock->wait_lock);
                ret = -EDEADLK;
+
+               /*
+                * When the deadlock is due to ww_mutex; also see above. Don't
+                * report the deadlock and instead let the ww_mutex wound/die
+                * logic pick which of the contending threads gets -EDEADLK.
+                *
+                * NOTE: assumes the cycle only contains a single ww_class; any
+                * other configuration and we fail to report; also, see
+                * lockdep.
+                */
+               if (IS_ENABLED(CONFIG_PREEMPT_RT) && orig_waiter->ww_ctx)
+                       ret = 0;
+
+               raw_spin_unlock(&lock->wait_lock);
                goto out_unlock_pi;
        }
 
@@ -653,8 +834,7 @@ static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task,
         * serializes all pi_waiters access and rb_erase() does not care about
         * the values of the node being removed.
         */
-       waiter->prio = task->prio;
-       waiter->deadline = task->dl.deadline;
+       waiter_update_prio(waiter, task);
 
        rt_mutex_enqueue(lock, waiter);
 
@@ -676,7 +856,7 @@ static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task,
                 * to get the lock.
                 */
                if (prerequeue_top_waiter != rt_mutex_top_waiter(lock))
-                       wake_up_process(rt_mutex_top_waiter(lock)->task);
+                       wake_up_state(waiter->task, waiter->wake_state);
                raw_spin_unlock_irq(&lock->wait_lock);
                return 0;
        }
@@ -779,7 +959,7 @@ static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task,
  *         callsite called task_blocked_on_lock(), otherwise NULL
  */
 static int __sched
-try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
+try_to_take_rt_mutex(struct rt_mutex_base *lock, struct task_struct *task,
                     struct rt_mutex_waiter *waiter)
 {
        lockdep_assert_held(&lock->wait_lock);
@@ -815,19 +995,21 @@ try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
         * trylock attempt.
         */
        if (waiter) {
-               /*
-                * If waiter is not the highest priority waiter of
-                * @lock, give up.
-                */
-               if (waiter != rt_mutex_top_waiter(lock))
-                       return 0;
+               struct rt_mutex_waiter *top_waiter = rt_mutex_top_waiter(lock);
 
                /*
-                * We can acquire the lock. Remove the waiter from the
-                * lock waiters tree.
+                * If waiter is the highest priority waiter of @lock,
+                * or allowed to steal it, take it over.
                 */
-               rt_mutex_dequeue(lock, waiter);
-
+               if (waiter == top_waiter || rt_mutex_steal(waiter, top_waiter)) {
+                       /*
+                        * We can acquire the lock. Remove the waiter from the
+                        * lock waiters tree.
+                        */
+                       rt_mutex_dequeue(lock, waiter);
+               } else {
+                       return 0;
+               }
        } else {
                /*
                 * If the lock has waiters already we check whether @task is
@@ -838,13 +1020,9 @@ try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
                 * not need to be dequeued.
                 */
                if (rt_mutex_has_waiters(lock)) {
-                       /*
-                        * If @task->prio is greater than or equal to
-                        * the top waiter priority (kernel view),
-                        * @task lost.
-                        */
-                       if (!rt_mutex_waiter_less(task_to_waiter(task),
-                                                 rt_mutex_top_waiter(lock)))
+                       /* Check whether the trylock can steal it. */
+                       if (!rt_mutex_steal(task_to_waiter(task),
+                                           rt_mutex_top_waiter(lock)))
                                return 0;
 
                        /*
@@ -897,14 +1075,15 @@ takeit:
  *
  * This must be called with lock->wait_lock held and interrupts disabled
  */
-static int __sched task_blocks_on_rt_mutex(struct rt_mutex *lock,
+static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock,
                                           struct rt_mutex_waiter *waiter,
                                           struct task_struct *task,
+                                          struct ww_acquire_ctx *ww_ctx,
                                           enum rtmutex_chainwalk chwalk)
 {
        struct task_struct *owner = rt_mutex_owner(lock);
        struct rt_mutex_waiter *top_waiter = waiter;
-       struct rt_mutex *next_lock;
+       struct rt_mutex_base *next_lock;
        int chain_walk = 0, res;
 
        lockdep_assert_held(&lock->wait_lock);
@@ -924,8 +1103,7 @@ static int __sched task_blocks_on_rt_mutex(struct rt_mutex *lock,
        raw_spin_lock(&task->pi_lock);
        waiter->task = task;
        waiter->lock = lock;
-       waiter->prio = task->prio;
-       waiter->deadline = task->dl.deadline;
+       waiter_update_prio(waiter, task);
 
        /* Get the top priority waiter on the lock */
        if (rt_mutex_has_waiters(lock))
@@ -936,6 +1114,21 @@ static int __sched task_blocks_on_rt_mutex(struct rt_mutex *lock,
 
        raw_spin_unlock(&task->pi_lock);
 
+       if (build_ww_mutex() && ww_ctx) {
+               struct rt_mutex *rtm;
+
+               /* Check whether the waiter should back out immediately */
+               rtm = container_of(lock, struct rt_mutex, rtmutex);
+               res = __ww_mutex_add_waiter(waiter, rtm, ww_ctx);
+               if (res) {
+                       raw_spin_lock(&task->pi_lock);
+                       rt_mutex_dequeue(lock, waiter);
+                       task->pi_blocked_on = NULL;
+                       raw_spin_unlock(&task->pi_lock);
+                       return res;
+               }
+       }
+
        if (!owner)
                return 0;
 
@@ -986,8 +1179,8 @@ static int __sched task_blocks_on_rt_mutex(struct rt_mutex *lock,
  *
  * Called with lock->wait_lock held and interrupts disabled.
  */
-static void __sched mark_wakeup_next_waiter(struct wake_q_head *wake_q,
-                                           struct rt_mutex *lock)
+static void __sched mark_wakeup_next_waiter(struct rt_wake_q_head *wqh,
+                                           struct rt_mutex_base *lock)
 {
        struct rt_mutex_waiter *waiter;
 
@@ -1023,25 +1216,201 @@ static void __sched mark_wakeup_next_waiter(struct wake_q_head *wake_q,
         * deboost but before waking our donor task, hence the preempt_disable()
         * before unlock.
         *
-        * Pairs with preempt_enable() in rt_mutex_postunlock();
+        * Pairs with preempt_enable() in rt_mutex_wake_up_q();
         */
        preempt_disable();
-       wake_q_add(wake_q, waiter->task);
+       rt_mutex_wake_q_add(wqh, waiter);
        raw_spin_unlock(&current->pi_lock);
 }
 
+static int __sched __rt_mutex_slowtrylock(struct rt_mutex_base *lock)
+{
+       int ret = try_to_take_rt_mutex(lock, current, NULL);
+
+       /*
+        * try_to_take_rt_mutex() sets the lock waiters bit
+        * unconditionally. Clean this up.
+        */
+       fixup_rt_mutex_waiters(lock);
+
+       return ret;
+}
+
+/*
+ * Slow path try-lock function:
+ */
+static int __sched rt_mutex_slowtrylock(struct rt_mutex_base *lock)
+{
+       unsigned long flags;
+       int ret;
+
+       /*
+        * If the lock already has an owner we fail to get the lock.
+        * This can be done without taking the @lock->wait_lock as
+        * it is only being read, and this is a trylock anyway.
+        */
+       if (rt_mutex_owner(lock))
+               return 0;
+
+       /*
+        * The mutex has currently no owner. Lock the wait lock and try to
+        * acquire the lock. We use irqsave here to support early boot calls.
+        */
+       raw_spin_lock_irqsave(&lock->wait_lock, flags);
+
+       ret = __rt_mutex_slowtrylock(lock);
+
+       raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
+
+       return ret;
+}
+
+static __always_inline int __rt_mutex_trylock(struct rt_mutex_base *lock)
+{
+       if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
+               return 1;
+
+       return rt_mutex_slowtrylock(lock);
+}
+
+/*
+ * Slow path to release a rt-mutex.
+ */
+static void __sched rt_mutex_slowunlock(struct rt_mutex_base *lock)
+{
+       DEFINE_RT_WAKE_Q(wqh);
+       unsigned long flags;
+
+       /* irqsave required to support early boot calls */
+       raw_spin_lock_irqsave(&lock->wait_lock, flags);
+
+       debug_rt_mutex_unlock(lock);
+
+       /*
+        * We must be careful here if the fast path is enabled. If we
+        * have no waiters queued we cannot set owner to NULL here
+        * because of:
+        *
+        * foo->lock->owner = NULL;
+        *                      rtmutex_lock(foo->lock);   <- fast path
+        *                      free = atomic_dec_and_test(foo->refcnt);
+        *                      rtmutex_unlock(foo->lock); <- fast path
+        *                      if (free)
+        *                              kfree(foo);
+        * raw_spin_unlock(foo->lock->wait_lock);
+        *
+        * So for the fastpath enabled kernel:
+        *
+        * Nothing can set the waiters bit as long as we hold
+        * lock->wait_lock. So we do the following sequence:
+        *
+        *      owner = rt_mutex_owner(lock);
+        *      clear_rt_mutex_waiters(lock);
+        *      raw_spin_unlock(&lock->wait_lock);
+        *      if (cmpxchg(&lock->owner, owner, 0) == owner)
+        *              return;
+        *      goto retry;
+        *
+        * The fastpath disabled variant is simple as all access to
+        * lock->owner is serialized by lock->wait_lock:
+        *
+        *      lock->owner = NULL;
+        *      raw_spin_unlock(&lock->wait_lock);
+        */
+       while (!rt_mutex_has_waiters(lock)) {
+               /* Drops lock->wait_lock ! */
+               if (unlock_rt_mutex_safe(lock, flags) == true)
+                       return;
+               /* Relock the rtmutex and try again */
+               raw_spin_lock_irqsave(&lock->wait_lock, flags);
+       }
+
+       /*
+        * The wakeup next waiter path does not suffer from the above
+        * race. See the comments there.
+        *
+        * Queue the next waiter for wakeup once we release the wait_lock.
+        */
+       mark_wakeup_next_waiter(&wqh, lock);
+       raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
+
+       rt_mutex_wake_up_q(&wqh);
+}
+
+static __always_inline void __rt_mutex_unlock(struct rt_mutex_base *lock)
+{
+       if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
+               return;
+
+       rt_mutex_slowunlock(lock);
+}
+
+#ifdef CONFIG_SMP
+static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock,
+                                 struct rt_mutex_waiter *waiter,
+                                 struct task_struct *owner)
+{
+       bool res = true;
+
+       rcu_read_lock();
+       for (;;) {
+               /* If owner changed, trylock again. */
+               if (owner != rt_mutex_owner(lock))
+                       break;
+               /*
+                * Ensure that @owner is dereferenced after checking that
+                * the lock owner still matches @owner. If that fails,
+                * @owner might point to freed memory. If it still matches,
+                * the rcu_read_lock() ensures the memory stays valid.
+                */
+               barrier();
+               /*
+                * Stop spinning when:
+                *  - the lock owner has been scheduled out
+                *  - current is not longer the top waiter
+                *  - current is requested to reschedule (redundant
+                *    for CONFIG_PREEMPT_RCU=y)
+                *  - the VCPU on which owner runs is preempted
+                */
+               if (!owner->on_cpu || need_resched() ||
+                   rt_mutex_waiter_is_top_waiter(lock, waiter) ||
+                   vcpu_is_preempted(task_cpu(owner))) {
+                       res = false;
+                       break;
+               }
+               cpu_relax();
+       }
+       rcu_read_unlock();
+       return res;
+}
+#else
+static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock,
+                                 struct rt_mutex_waiter *waiter,
+                                 struct task_struct *owner)
+{
+       return false;
+}
+#endif
+
+#ifdef RT_MUTEX_BUILD_MUTEX
+/*
+ * Functions required for:
+ *     - rtmutex, futex on all kernels
+ *     - mutex and rwsem substitutions on RT kernels
+ */
+
 /*
  * Remove a waiter from a lock and give up
  *
- * Must be called with lock->wait_lock held and interrupts disabled. I must
+ * Must be called with lock->wait_lock held and interrupts disabled. It must
  * have just failed to try_to_take_rt_mutex().
  */
-static void __sched remove_waiter(struct rt_mutex *lock,
+static void __sched remove_waiter(struct rt_mutex_base *lock,
                                  struct rt_mutex_waiter *waiter)
 {
        bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
        struct task_struct *owner = rt_mutex_owner(lock);
-       struct rt_mutex *next_lock;
+       struct rt_mutex_base *next_lock;
 
        lockdep_assert_held(&lock->wait_lock);
 
@@ -1089,56 +1458,25 @@ static void __sched remove_waiter(struct rt_mutex *lock,
        raw_spin_lock_irq(&lock->wait_lock);
 }
 
-/*
- * Recheck the pi chain, in case we got a priority setting
+/**
+ * rt_mutex_slowlock_block() - Perform the wait-wake-try-to-take loop
+ * @lock:               the rt_mutex to take
+ * @ww_ctx:             WW mutex context pointer
+ * @state:              the state the task should block in (TASK_INTERRUPTIBLE
+ *                      or TASK_UNINTERRUPTIBLE)
+ * @timeout:            the pre-initialized and started timer, or NULL for none
+ * @waiter:             the pre-initialized rt_mutex_waiter
  *
- * Called from sched_setscheduler
+ * Must be called with lock->wait_lock held and interrupts disabled
  */
-void __sched rt_mutex_adjust_pi(struct task_struct *task)
-{
-       struct rt_mutex_waiter *waiter;
-       struct rt_mutex *next_lock;
-       unsigned long flags;
-
-       raw_spin_lock_irqsave(&task->pi_lock, flags);
-
-       waiter = task->pi_blocked_on;
-       if (!waiter || rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
-               raw_spin_unlock_irqrestore(&task->pi_lock, flags);
-               return;
-       }
-       next_lock = waiter->lock;
-       raw_spin_unlock_irqrestore(&task->pi_lock, flags);
-
-       /* gets dropped in rt_mutex_adjust_prio_chain()! */
-       get_task_struct(task);
-
-       rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
-                                  next_lock, NULL, task);
-}
-
-void __sched rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
-{
-       debug_rt_mutex_init_waiter(waiter);
-       RB_CLEAR_NODE(&waiter->pi_tree_entry);
-       RB_CLEAR_NODE(&waiter->tree_entry);
-       waiter->task = NULL;
-}
-
-/**
- * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
- * @lock:               the rt_mutex to take
- * @state:              the state the task should block in (TASK_INTERRUPTIBLE
- *                      or TASK_UNINTERRUPTIBLE)
- * @timeout:            the pre-initialized and started timer, or NULL for none
- * @waiter:             the pre-initialized rt_mutex_waiter
- *
- * Must be called with lock->wait_lock held and interrupts disabled
- */
-static int __sched __rt_mutex_slowlock(struct rt_mutex *lock, unsigned int state,
-                                      struct hrtimer_sleeper *timeout,
-                                      struct rt_mutex_waiter *waiter)
+static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock,
+                                          struct ww_acquire_ctx *ww_ctx,
+                                          unsigned int state,
+                                          struct hrtimer_sleeper *timeout,
+                                          struct rt_mutex_waiter *waiter)
 {
+       struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex);
+       struct task_struct *owner;
        int ret = 0;
 
        for (;;) {
@@ -1155,9 +1493,20 @@ static int __sched __rt_mutex_slowlock(struct rt_mutex *lock, unsigned int state
                        break;
                }
 
+               if (build_ww_mutex() && ww_ctx) {
+                       ret = __ww_mutex_check_kill(rtm, waiter, ww_ctx);
+                       if (ret)
+                               break;
+               }
+
+               if (waiter == rt_mutex_top_waiter(lock))
+                       owner = rt_mutex_owner(lock);
+               else
+                       owner = NULL;
                raw_spin_unlock_irq(&lock->wait_lock);
 
-               schedule();
+               if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner))
+                       schedule();
 
                raw_spin_lock_irq(&lock->wait_lock);
                set_current_state(state);
@@ -1177,6 +1526,9 @@ static void __sched rt_mutex_handle_deadlock(int res, int detect_deadlock,
        if (res != -EDEADLOCK || detect_deadlock)
                return;
 
+       if (build_ww_mutex() && w->ww_ctx)
+               return;
+
        /*
         * Yell loudly and stop the task right here.
         */
@@ -1187,51 +1539,52 @@ static void __sched rt_mutex_handle_deadlock(int res, int detect_deadlock,
        }
 }
 
-/*
- * Slow path lock function:
+/**
+ * __rt_mutex_slowlock - Locking slowpath invoked with lock::wait_lock held
+ * @lock:      The rtmutex to block lock
+ * @ww_ctx:    WW mutex context pointer
+ * @state:     The task state for sleeping
+ * @chwalk:    Indicator whether full or partial chainwalk is requested
+ * @waiter:    Initializer waiter for blocking
  */
-static int __sched rt_mutex_slowlock(struct rt_mutex *lock, unsigned int state,
-                                    struct hrtimer_sleeper *timeout,
-                                    enum rtmutex_chainwalk chwalk)
+static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
+                                      struct ww_acquire_ctx *ww_ctx,
+                                      unsigned int state,
+                                      enum rtmutex_chainwalk chwalk,
+                                      struct rt_mutex_waiter *waiter)
 {
-       struct rt_mutex_waiter waiter;
-       unsigned long flags;
-       int ret = 0;
-
-       rt_mutex_init_waiter(&waiter);
+       struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex);
+       struct ww_mutex *ww = ww_container_of(rtm);
+       int ret;
 
-       /*
-        * Technically we could use raw_spin_[un]lock_irq() here, but this can
-        * be called in early boot if the cmpxchg() fast path is disabled
-        * (debug, no architecture support). In this case we will acquire the
-        * rtmutex with lock->wait_lock held. But we cannot unconditionally
-        * enable interrupts in that early boot case. So we need to use the
-        * irqsave/restore variants.
-        */
-       raw_spin_lock_irqsave(&lock->wait_lock, flags);
+       lockdep_assert_held(&lock->wait_lock);
 
        /* Try to acquire the lock again: */
        if (try_to_take_rt_mutex(lock, current, NULL)) {
-               raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
+               if (build_ww_mutex() && ww_ctx) {
+                       __ww_mutex_check_waiters(rtm, ww_ctx);
+                       ww_mutex_lock_acquired(ww, ww_ctx);
+               }
                return 0;
        }
 
        set_current_state(state);
 
-       /* Setup the timer, when timeout != NULL */
-       if (unlikely(timeout))
-               hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
-
-       ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk);
-
+       ret = task_blocks_on_rt_mutex(lock, waiter, current, ww_ctx, chwalk);
        if (likely(!ret))
-               /* sleep on the mutex */
-               ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
-
-       if (unlikely(ret)) {
+               ret = rt_mutex_slowlock_block(lock, ww_ctx, state, NULL, waiter);
+
+       if (likely(!ret)) {
+               /* acquired the lock */
+               if (build_ww_mutex() && ww_ctx) {
+                       if (!ww_ctx->is_wait_die)
+                               __ww_mutex_check_waiters(rtm, ww_ctx);
+                       ww_mutex_lock_acquired(ww, ww_ctx);
+               }
+       } else {
                __set_current_state(TASK_RUNNING);
-               remove_waiter(lock, &waiter);
-               rt_mutex_handle_deadlock(ret, chwalk, &waiter);
+               remove_waiter(lock, waiter);
+               rt_mutex_handle_deadlock(ret, chwalk, waiter);
        }
 
        /*
@@ -1239,547 +1592,126 @@ static int __sched rt_mutex_slowlock(struct rt_mutex *lock, unsigned int state,
         * unconditionally. We might have to fix that up.
         */
        fixup_rt_mutex_waiters(lock);
-
-       raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
-
-       /* Remove pending timer: */
-       if (unlikely(timeout))
-               hrtimer_cancel(&timeout->timer);
-
-       debug_rt_mutex_free_waiter(&waiter);
-
        return ret;
 }
 
-static int __sched __rt_mutex_slowtrylock(struct rt_mutex *lock)
+static inline int __rt_mutex_slowlock_locked(struct rt_mutex_base *lock,
+                                            struct ww_acquire_ctx *ww_ctx,
+                                            unsigned int state)
 {
-       int ret = try_to_take_rt_mutex(lock, current, NULL);
+       struct rt_mutex_waiter waiter;
+       int ret;
 
-       /*
-        * try_to_take_rt_mutex() sets the lock waiters bit
-        * unconditionally. Clean this up.
-        */
-       fixup_rt_mutex_waiters(lock);
+       rt_mutex_init_waiter(&waiter);
+       waiter.ww_ctx = ww_ctx;
+
+       ret = __rt_mutex_slowlock(lock, ww_ctx, state, RT_MUTEX_MIN_CHAINWALK,
+                                 &waiter);
 
+       debug_rt_mutex_free_waiter(&waiter);
        return ret;
 }
 
 /*
- * Slow path try-lock function:
+ * rt_mutex_slowlock - Locking slowpath invoked when fast path fails
+ * @lock:      The rtmutex to block lock
+ * @ww_ctx:    WW mutex context pointer
+ * @state:     The task state for sleeping
  */
-static int __sched rt_mutex_slowtrylock(struct rt_mutex *lock)
+static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock,
+                                    struct ww_acquire_ctx *ww_ctx,
+                                    unsigned int state)
 {
        unsigned long flags;
        int ret;
 
        /*
-        * If the lock already has an owner we fail to get the lock.
-        * This can be done without taking the @lock->wait_lock as
-        * it is only being read, and this is a trylock anyway.
-        */
-       if (rt_mutex_owner(lock))
-               return 0;
-
-       /*
-        * The mutex has currently no owner. Lock the wait lock and try to
-        * acquire the lock. We use irqsave here to support early boot calls.
+        * Technically we could use raw_spin_[un]lock_irq() here, but this can
+        * be called in early boot if the cmpxchg() fast path is disabled
+        * (debug, no architecture support). In this case we will acquire the
+        * rtmutex with lock->wait_lock held. But we cannot unconditionally
+        * enable interrupts in that early boot case. So we need to use the
+        * irqsave/restore variants.
         */
        raw_spin_lock_irqsave(&lock->wait_lock, flags);
-
-       ret = __rt_mutex_slowtrylock(lock);
-
+       ret = __rt_mutex_slowlock_locked(lock, ww_ctx, state);
        raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
 
        return ret;
 }
 
-/*
- * Performs the wakeup of the top-waiter and re-enables preemption.
- */
-void __sched rt_mutex_postunlock(struct wake_q_head *wake_q)
-{
-       wake_up_q(wake_q);
-
-       /* Pairs with preempt_disable() in mark_wakeup_next_waiter() */
-       preempt_enable();
-}
-
-/*
- * Slow path to release a rt-mutex.
- *
- * Return whether the current task needs to call rt_mutex_postunlock().
- */
-static void __sched rt_mutex_slowunlock(struct rt_mutex *lock)
-{
-       DEFINE_WAKE_Q(wake_q);
-       unsigned long flags;
-
-       /* irqsave required to support early boot calls */
-       raw_spin_lock_irqsave(&lock->wait_lock, flags);
-
-       debug_rt_mutex_unlock(lock);
-
-       /*
-        * We must be careful here if the fast path is enabled. If we
-        * have no waiters queued we cannot set owner to NULL here
-        * because of:
-        *
-        * foo->lock->owner = NULL;
-        *                      rtmutex_lock(foo->lock);   <- fast path
-        *                      free = atomic_dec_and_test(foo->refcnt);
-        *                      rtmutex_unlock(foo->lock); <- fast path
-        *                      if (free)
-        *                              kfree(foo);
-        * raw_spin_unlock(foo->lock->wait_lock);
-        *
-        * So for the fastpath enabled kernel:
-        *
-        * Nothing can set the waiters bit as long as we hold
-        * lock->wait_lock. So we do the following sequence:
-        *
-        *      owner = rt_mutex_owner(lock);
-        *      clear_rt_mutex_waiters(lock);
-        *      raw_spin_unlock(&lock->wait_lock);
-        *      if (cmpxchg(&lock->owner, owner, 0) == owner)
-        *              return;
-        *      goto retry;
-        *
-        * The fastpath disabled variant is simple as all access to
-        * lock->owner is serialized by lock->wait_lock:
-        *
-        *      lock->owner = NULL;
-        *      raw_spin_unlock(&lock->wait_lock);
-        */
-       while (!rt_mutex_has_waiters(lock)) {
-               /* Drops lock->wait_lock ! */
-               if (unlock_rt_mutex_safe(lock, flags) == true)
-                       return;
-               /* Relock the rtmutex and try again */
-               raw_spin_lock_irqsave(&lock->wait_lock, flags);
-       }
-
-       /*
-        * The wakeup next waiter path does not suffer from the above
-        * race. See the comments there.
-        *
-        * Queue the next waiter for wakeup once we release the wait_lock.
-        */
-       mark_wakeup_next_waiter(&wake_q, lock);
-       raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
-
-       rt_mutex_postunlock(&wake_q);
-}
-
-/*
- * debug aware fast / slowpath lock,trylock,unlock
- *
- * The atomic acquire/release ops are compiled away, when either the
- * architecture does not support cmpxchg or when debugging is enabled.
- */
-static __always_inline int __rt_mutex_lock(struct rt_mutex *lock, long state,
-                                          unsigned int subclass)
+static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock,
+                                          unsigned int state)
 {
-       int ret;
-
-       might_sleep();
-       mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
-
        if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
                return 0;
 
-       ret = rt_mutex_slowlock(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
-       if (ret)
-               mutex_release(&lock->dep_map, _RET_IP_);
-       return ret;
+       return rt_mutex_slowlock(lock, NULL, state);
 }
+#endif /* RT_MUTEX_BUILD_MUTEX */
 
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-/**
- * rt_mutex_lock_nested - lock a rt_mutex
- *
- * @lock: the rt_mutex to be locked
- * @subclass: the lockdep subclass
- */
-void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass)
-{
-       __rt_mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass);
-}
-EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
-
-#else /* !CONFIG_DEBUG_LOCK_ALLOC */
-
-/**
- * rt_mutex_lock - lock a rt_mutex
- *
- * @lock: the rt_mutex to be locked
- */
-void __sched rt_mutex_lock(struct rt_mutex *lock)
-{
-       __rt_mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0);
-}
-EXPORT_SYMBOL_GPL(rt_mutex_lock);
-#endif
-
-/**
- * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
- *
- * @lock:              the rt_mutex to be locked
- *
- * Returns:
- *  0          on success
- * -EINTR      when interrupted by a signal
- */
-int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
-{
-       return __rt_mutex_lock(lock, TASK_INTERRUPTIBLE, 0);
-}
-EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
-
-/**
- * rt_mutex_trylock - try to lock a rt_mutex
- *
- * @lock:      the rt_mutex to be locked
- *
- * This function can only be called in thread context. It's safe to call it
- * from atomic regions, but not from hard or soft interrupt context.
- *
- * Returns:
- *  1 on success
- *  0 on contention
- */
-int __sched rt_mutex_trylock(struct rt_mutex *lock)
-{
-       int ret;
-
-       if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES) && WARN_ON_ONCE(!in_task()))
-               return 0;
-
-       /*
-        * No lockdep annotation required because lockdep disables the fast
-        * path.
-        */
-       if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
-               return 1;
-
-       ret = rt_mutex_slowtrylock(lock);
-       if (ret)
-               mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(rt_mutex_trylock);
-
-/**
- * rt_mutex_unlock - unlock a rt_mutex
- *
- * @lock: the rt_mutex to be unlocked
- */
-void __sched rt_mutex_unlock(struct rt_mutex *lock)
-{
-       mutex_release(&lock->dep_map, _RET_IP_);
-       if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
-               return;
-
-       rt_mutex_slowunlock(lock);
-}
-EXPORT_SYMBOL_GPL(rt_mutex_unlock);
-
+#ifdef RT_MUTEX_BUILD_SPINLOCKS
 /*
- * Futex variants, must not use fastpath.
+ * Functions required for spin/rw_lock substitution on RT kernels
  */
-int __sched rt_mutex_futex_trylock(struct rt_mutex *lock)
-{
-       return rt_mutex_slowtrylock(lock);
-}
-
-int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock)
-{
-       return __rt_mutex_slowtrylock(lock);
-}
 
 /**
- * __rt_mutex_futex_unlock - Futex variant, that since futex variants
- * do not use the fast-path, can be simple and will not need to retry.
- *
- * @lock:      The rt_mutex to be unlocked
- * @wake_q:    The wake queue head from which to get the next lock waiter
+ * rtlock_slowlock_locked - Slow path lock acquisition for RT locks
+ * @lock:      The underlying RT mutex
  */
-bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock,
-                                    struct wake_q_head *wake_q)
+static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock)
 {
-       lockdep_assert_held(&lock->wait_lock);
-
-       debug_rt_mutex_unlock(lock);
-
-       if (!rt_mutex_has_waiters(lock)) {
-               lock->owner = NULL;
-               return false; /* done */
-       }
-
-       /*
-        * We've already deboosted, mark_wakeup_next_waiter() will
-        * retain preempt_disabled when we drop the wait_lock, to
-        * avoid inversion prior to the wakeup.  preempt_disable()
-        * therein pairs with rt_mutex_postunlock().
-        */
-       mark_wakeup_next_waiter(wake_q, lock);
-
-       return true; /* call postunlock() */
-}
-
-void __sched rt_mutex_futex_unlock(struct rt_mutex *lock)
-{
-       DEFINE_WAKE_Q(wake_q);
-       unsigned long flags;
-       bool postunlock;
-
-       raw_spin_lock_irqsave(&lock->wait_lock, flags);
-       postunlock = __rt_mutex_futex_unlock(lock, &wake_q);
-       raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
-
-       if (postunlock)
-               rt_mutex_postunlock(&wake_q);
-}
+       struct rt_mutex_waiter waiter;
+       struct task_struct *owner;
 
-/**
- * __rt_mutex_init - initialize the rt_mutex
- *
- * @lock:      The rt_mutex to be initialized
- * @name:      The lock name used for debugging
- * @key:       The lock class key used for debugging
- *
- * Initialize the rt_mutex to unlocked state.
- *
- * Initializing of a locked rt_mutex is not allowed
- */
-void __sched __rt_mutex_init(struct rt_mutex *lock, const char *name,
-                    struct lock_class_key *key)
-{
-       debug_check_no_locks_freed((void *)lock, sizeof(*lock));
-       lockdep_init_map(&lock->dep_map, name, key, 0);
+       lockdep_assert_held(&lock->wait_lock);
 
-       __rt_mutex_basic_init(lock);
-}
-EXPORT_SYMBOL_GPL(__rt_mutex_init);
+       if (try_to_take_rt_mutex(lock, current, NULL))
+               return;
 
-/**
- * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
- *                             proxy owner
- *
- * @lock:      the rt_mutex to be locked
- * @proxy_owner:the task to set as owner
- *
- * No locking. Caller has to do serializing itself
- *
- * Special API call for PI-futex support. This initializes the rtmutex and
- * assigns it to @proxy_owner. Concurrent operations on the rtmutex are not
- * possible at this point because the pi_state which contains the rtmutex
- * is not yet visible to other tasks.
- */
-void __sched rt_mutex_init_proxy_locked(struct rt_mutex *lock,
-                                       struct task_struct *proxy_owner)
-{
-       __rt_mutex_basic_init(lock);
-       rt_mutex_set_owner(lock, proxy_owner);
-}
+       rt_mutex_init_rtlock_waiter(&waiter);
 
-/**
- * rt_mutex_proxy_unlock - release a lock on behalf of owner
- *
- * @lock:      the rt_mutex to be locked
- *
- * No locking. Caller has to do serializing itself
- *
- * Special API call for PI-futex support. This merrily cleans up the rtmutex
- * (debugging) state. Concurrent operations on this rt_mutex are not
- * possible because it belongs to the pi_state which is about to be freed
- * and it is not longer visible to other tasks.
- */
-void __sched rt_mutex_proxy_unlock(struct rt_mutex *lock)
-{
-       debug_rt_mutex_proxy_unlock(lock);
-       rt_mutex_set_owner(lock, NULL);
-}
+       /* Save current state and set state to TASK_RTLOCK_WAIT */
+       current_save_and_set_rtlock_wait_state();
 
-/**
- * __rt_mutex_start_proxy_lock() - Start lock acquisition for another task
- * @lock:              the rt_mutex to take
- * @waiter:            the pre-initialized rt_mutex_waiter
- * @task:              the task to prepare
- *
- * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
- * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
- *
- * NOTE: does _NOT_ remove the @waiter on failure; must either call
- * rt_mutex_wait_proxy_lock() or rt_mutex_cleanup_proxy_lock() after this.
- *
- * Returns:
- *  0 - task blocked on lock
- *  1 - acquired the lock for task, caller should wake it up
- * <0 - error
- *
- * Special API call for PI-futex support.
- */
-int __sched __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
-                                       struct rt_mutex_waiter *waiter,
-                                       struct task_struct *task)
-{
-       int ret;
+       task_blocks_on_rt_mutex(lock, &waiter, current, NULL, RT_MUTEX_MIN_CHAINWALK);
 
-       lockdep_assert_held(&lock->wait_lock);
+       for (;;) {
+               /* Try to acquire the lock again */
+               if (try_to_take_rt_mutex(lock, current, &waiter))
+                       break;
 
-       if (try_to_take_rt_mutex(lock, task, NULL))
-               return 1;
+               if (&waiter == rt_mutex_top_waiter(lock))
+                       owner = rt_mutex_owner(lock);
+               else
+                       owner = NULL;
+               raw_spin_unlock_irq(&lock->wait_lock);
 
-       /* We enforce deadlock detection for futexes */
-       ret = task_blocks_on_rt_mutex(lock, waiter, task,
-                                     RT_MUTEX_FULL_CHAINWALK);
+               if (!owner || !rtmutex_spin_on_owner(lock, &waiter, owner))
+                       schedule_rtlock();
 
-       if (ret && !rt_mutex_owner(lock)) {
-               /*
-                * Reset the return value. We might have
-                * returned with -EDEADLK and the owner
-                * released the lock while we were walking the
-                * pi chain.  Let the waiter sort it out.
-                */
-               ret = 0;
+               raw_spin_lock_irq(&lock->wait_lock);
+               set_current_state(TASK_RTLOCK_WAIT);
        }
 
-       return ret;
-}
-
-/**
- * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
- * @lock:              the rt_mutex to take
- * @waiter:            the pre-initialized rt_mutex_waiter
- * @task:              the task to prepare
- *
- * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
- * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
- *
- * NOTE: unlike __rt_mutex_start_proxy_lock this _DOES_ remove the @waiter
- * on failure.
- *
- * Returns:
- *  0 - task blocked on lock
- *  1 - acquired the lock for task, caller should wake it up
- * <0 - error
- *
- * Special API call for PI-futex support.
- */
-int __sched rt_mutex_start_proxy_lock(struct rt_mutex *lock,
-                                     struct rt_mutex_waiter *waiter,
-                                     struct task_struct *task)
-{
-       int ret;
-
-       raw_spin_lock_irq(&lock->wait_lock);
-       ret = __rt_mutex_start_proxy_lock(lock, waiter, task);
-       if (unlikely(ret))
-               remove_waiter(lock, waiter);
-       raw_spin_unlock_irq(&lock->wait_lock);
-
-       return ret;
-}
-
-/**
- * rt_mutex_wait_proxy_lock() - Wait for lock acquisition
- * @lock:              the rt_mutex we were woken on
- * @to:                        the timeout, null if none. hrtimer should already have
- *                     been started.
- * @waiter:            the pre-initialized rt_mutex_waiter
- *
- * Wait for the lock acquisition started on our behalf by
- * rt_mutex_start_proxy_lock(). Upon failure, the caller must call
- * rt_mutex_cleanup_proxy_lock().
- *
- * Returns:
- *  0 - success
- * <0 - error, one of -EINTR, -ETIMEDOUT
- *
- * Special API call for PI-futex support
- */
-int __sched rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
-                                    struct hrtimer_sleeper *to,
-                                    struct rt_mutex_waiter *waiter)
-{
-       int ret;
+       /* Restore the task state */
+       current_restore_rtlock_saved_state();
 
-       raw_spin_lock_irq(&lock->wait_lock);
-       /* sleep on the mutex */
-       set_current_state(TASK_INTERRUPTIBLE);
-       ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
        /*
-        * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
-        * have to fix that up.
+        * try_to_take_rt_mutex() sets the waiter bit unconditionally.
+        * We might have to fix that up:
         */
        fixup_rt_mutex_waiters(lock);
-       raw_spin_unlock_irq(&lock->wait_lock);
-
-       return ret;
+       debug_rt_mutex_free_waiter(&waiter);
 }
 
-/**
- * rt_mutex_cleanup_proxy_lock() - Cleanup failed lock acquisition
- * @lock:              the rt_mutex we were woken on
- * @waiter:            the pre-initialized rt_mutex_waiter
- *
- * Attempt to clean up after a failed __rt_mutex_start_proxy_lock() or
- * rt_mutex_wait_proxy_lock().
- *
- * Unless we acquired the lock; we're still enqueued on the wait-list and can
- * in fact still be granted ownership until we're removed. Therefore we can
- * find we are in fact the owner and must disregard the
- * rt_mutex_wait_proxy_lock() failure.
- *
- * Returns:
- *  true  - did the cleanup, we done.
- *  false - we acquired the lock after rt_mutex_wait_proxy_lock() returned,
- *          caller should disregards its return value.
- *
- * Special API call for PI-futex support
- */
-bool __sched rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock,
-                                        struct rt_mutex_waiter *waiter)
+static __always_inline void __sched rtlock_slowlock(struct rt_mutex_base *lock)
 {
-       bool cleanup = false;
-
-       raw_spin_lock_irq(&lock->wait_lock);
-       /*
-        * Do an unconditional try-lock, this deals with the lock stealing
-        * state where __rt_mutex_futex_unlock() -> mark_wakeup_next_waiter()
-        * sets a NULL owner.
-        *
-        * We're not interested in the return value, because the subsequent
-        * test on rt_mutex_owner() will infer that. If the trylock succeeded,
-        * we will own the lock and it will have removed the waiter. If we
-        * failed the trylock, we're still not owner and we need to remove
-        * ourselves.
-        */
-       try_to_take_rt_mutex(lock, current, waiter);
-       /*
-        * Unless we're the owner; we're still enqueued on the wait_list.
-        * So check if we became owner, if not, take us off the wait_list.
-        */
-       if (rt_mutex_owner(lock) != current) {
-               remove_waiter(lock, waiter);
-               cleanup = true;
-       }
-       /*
-        * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
-        * have to fix that up.
-        */
-       fixup_rt_mutex_waiters(lock);
-
-       raw_spin_unlock_irq(&lock->wait_lock);
+       unsigned long flags;
 
-       return cleanup;
+       raw_spin_lock_irqsave(&lock->wait_lock, flags);
+       rtlock_slowlock_locked(lock);
+       raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
 }
 
-#ifdef CONFIG_DEBUG_RT_MUTEXES
-void rt_mutex_debug_task_free(struct task_struct *task)
-{
-       DEBUG_LOCKS_WARN_ON(!RB_EMPTY_ROOT(&task->pi_waiters.rb_root));
-       DEBUG_LOCKS_WARN_ON(task->pi_blocked_on);
-}
-#endif
+#endif /* RT_MUTEX_BUILD_SPINLOCKS */
diff --git a/kernel/locking/rtmutex_api.c b/kernel/locking/rtmutex_api.c
new file mode 100644 (file)
index 0000000..5c9299a
--- /dev/null
@@ -0,0 +1,590 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * rtmutex API
+ */
+#include <linux/spinlock.h>
+#include <linux/export.h>
+
+#define RT_MUTEX_BUILD_MUTEX
+#include "rtmutex.c"
+
+/*
+ * Max number of times we'll walk the boosting chain:
+ */
+int max_lock_depth = 1024;
+
+/*
+ * Debug aware fast / slowpath lock,trylock,unlock
+ *
+ * The atomic acquire/release ops are compiled away, when either the
+ * architecture does not support cmpxchg or when debugging is enabled.
+ */
+static __always_inline int __rt_mutex_lock_common(struct rt_mutex *lock,
+                                                 unsigned int state,
+                                                 unsigned int subclass)
+{
+       int ret;
+
+       might_sleep();
+       mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
+       ret = __rt_mutex_lock(&lock->rtmutex, state);
+       if (ret)
+               mutex_release(&lock->dep_map, _RET_IP_);
+       return ret;
+}
+
+void rt_mutex_base_init(struct rt_mutex_base *rtb)
+{
+       __rt_mutex_base_init(rtb);
+}
+EXPORT_SYMBOL(rt_mutex_base_init);
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+/**
+ * rt_mutex_lock_nested - lock a rt_mutex
+ *
+ * @lock: the rt_mutex to be locked
+ * @subclass: the lockdep subclass
+ */
+void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass)
+{
+       __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass);
+}
+EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
+
+#else /* !CONFIG_DEBUG_LOCK_ALLOC */
+
+/**
+ * rt_mutex_lock - lock a rt_mutex
+ *
+ * @lock: the rt_mutex to be locked
+ */
+void __sched rt_mutex_lock(struct rt_mutex *lock)
+{
+       __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0);
+}
+EXPORT_SYMBOL_GPL(rt_mutex_lock);
+#endif
+
+/**
+ * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
+ *
+ * @lock:              the rt_mutex to be locked
+ *
+ * Returns:
+ *  0          on success
+ * -EINTR      when interrupted by a signal
+ */
+int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
+{
+       return __rt_mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0);
+}
+EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
+
+/**
+ * rt_mutex_trylock - try to lock a rt_mutex
+ *
+ * @lock:      the rt_mutex to be locked
+ *
+ * This function can only be called in thread context. It's safe to call it
+ * from atomic regions, but not from hard or soft interrupt context.
+ *
+ * Returns:
+ *  1 on success
+ *  0 on contention
+ */
+int __sched rt_mutex_trylock(struct rt_mutex *lock)
+{
+       int ret;
+
+       if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES) && WARN_ON_ONCE(!in_task()))
+               return 0;
+
+       ret = __rt_mutex_trylock(&lock->rtmutex);
+       if (ret)
+               mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(rt_mutex_trylock);
+
+/**
+ * rt_mutex_unlock - unlock a rt_mutex
+ *
+ * @lock: the rt_mutex to be unlocked
+ */
+void __sched rt_mutex_unlock(struct rt_mutex *lock)
+{
+       mutex_release(&lock->dep_map, _RET_IP_);
+       __rt_mutex_unlock(&lock->rtmutex);
+}
+EXPORT_SYMBOL_GPL(rt_mutex_unlock);
+
+/*
+ * Futex variants, must not use fastpath.
+ */
+int __sched rt_mutex_futex_trylock(struct rt_mutex_base *lock)
+{
+       return rt_mutex_slowtrylock(lock);
+}
+
+int __sched __rt_mutex_futex_trylock(struct rt_mutex_base *lock)
+{
+       return __rt_mutex_slowtrylock(lock);
+}
+
+/**
+ * __rt_mutex_futex_unlock - Futex variant, that since futex variants
+ * do not use the fast-path, can be simple and will not need to retry.
+ *
+ * @lock:      The rt_mutex to be unlocked
+ * @wqh:       The wake queue head from which to get the next lock waiter
+ */
+bool __sched __rt_mutex_futex_unlock(struct rt_mutex_base *lock,
+                                    struct rt_wake_q_head *wqh)
+{
+       lockdep_assert_held(&lock->wait_lock);
+
+       debug_rt_mutex_unlock(lock);
+
+       if (!rt_mutex_has_waiters(lock)) {
+               lock->owner = NULL;
+               return false; /* done */
+       }
+
+       /*
+        * We've already deboosted, mark_wakeup_next_waiter() will
+        * retain preempt_disabled when we drop the wait_lock, to
+        * avoid inversion prior to the wakeup.  preempt_disable()
+        * therein pairs with rt_mutex_postunlock().
+        */
+       mark_wakeup_next_waiter(wqh, lock);
+
+       return true; /* call postunlock() */
+}
+
+void __sched rt_mutex_futex_unlock(struct rt_mutex_base *lock)
+{
+       DEFINE_RT_WAKE_Q(wqh);
+       unsigned long flags;
+       bool postunlock;
+
+       raw_spin_lock_irqsave(&lock->wait_lock, flags);
+       postunlock = __rt_mutex_futex_unlock(lock, &wqh);
+       raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
+
+       if (postunlock)
+               rt_mutex_postunlock(&wqh);
+}
+
+/**
+ * __rt_mutex_init - initialize the rt_mutex
+ *
+ * @lock:      The rt_mutex to be initialized
+ * @name:      The lock name used for debugging
+ * @key:       The lock class key used for debugging
+ *
+ * Initialize the rt_mutex to unlocked state.
+ *
+ * Initializing of a locked rt_mutex is not allowed
+ */
+void __sched __rt_mutex_init(struct rt_mutex *lock, const char *name,
+                            struct lock_class_key *key)
+{
+       debug_check_no_locks_freed((void *)lock, sizeof(*lock));
+       __rt_mutex_base_init(&lock->rtmutex);
+       lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_SLEEP);
+}
+EXPORT_SYMBOL_GPL(__rt_mutex_init);
+
+/**
+ * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
+ *                             proxy owner
+ *
+ * @lock:      the rt_mutex to be locked
+ * @proxy_owner:the task to set as owner
+ *
+ * No locking. Caller has to do serializing itself
+ *
+ * Special API call for PI-futex support. This initializes the rtmutex and
+ * assigns it to @proxy_owner. Concurrent operations on the rtmutex are not
+ * possible at this point because the pi_state which contains the rtmutex
+ * is not yet visible to other tasks.
+ */
+void __sched rt_mutex_init_proxy_locked(struct rt_mutex_base *lock,
+                                       struct task_struct *proxy_owner)
+{
+       static struct lock_class_key pi_futex_key;
+
+       __rt_mutex_base_init(lock);
+       /*
+        * On PREEMPT_RT the futex hashbucket spinlock becomes 'sleeping'
+        * and rtmutex based. That causes a lockdep false positive, because
+        * some of the futex functions invoke spin_unlock(&hb->lock) with
+        * the wait_lock of the rtmutex associated to the pi_futex held.
+        * spin_unlock() in turn takes wait_lock of the rtmutex on which
+        * the spinlock is based, which makes lockdep notice a lock
+        * recursion. Give the futex/rtmutex wait_lock a separate key.
+        */
+       lockdep_set_class(&lock->wait_lock, &pi_futex_key);
+       rt_mutex_set_owner(lock, proxy_owner);
+}
+
+/**
+ * rt_mutex_proxy_unlock - release a lock on behalf of owner
+ *
+ * @lock:      the rt_mutex to be locked
+ *
+ * No locking. Caller has to do serializing itself
+ *
+ * Special API call for PI-futex support. This just cleans up the rtmutex
+ * (debugging) state. Concurrent operations on this rt_mutex are not
+ * possible because it belongs to the pi_state which is about to be freed
+ * and it is not longer visible to other tasks.
+ */
+void __sched rt_mutex_proxy_unlock(struct rt_mutex_base *lock)
+{
+       debug_rt_mutex_proxy_unlock(lock);
+       rt_mutex_set_owner(lock, NULL);
+}
+
+/**
+ * __rt_mutex_start_proxy_lock() - Start lock acquisition for another task
+ * @lock:              the rt_mutex to take
+ * @waiter:            the pre-initialized rt_mutex_waiter
+ * @task:              the task to prepare
+ *
+ * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
+ * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
+ *
+ * NOTE: does _NOT_ remove the @waiter on failure; must either call
+ * rt_mutex_wait_proxy_lock() or rt_mutex_cleanup_proxy_lock() after this.
+ *
+ * Returns:
+ *  0 - task blocked on lock
+ *  1 - acquired the lock for task, caller should wake it up
+ * <0 - error
+ *
+ * Special API call for PI-futex support.
+ */
+int __sched __rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
+                                       struct rt_mutex_waiter *waiter,
+                                       struct task_struct *task)
+{
+       int ret;
+
+       lockdep_assert_held(&lock->wait_lock);
+
+       if (try_to_take_rt_mutex(lock, task, NULL))
+               return 1;
+
+       /* We enforce deadlock detection for futexes */
+       ret = task_blocks_on_rt_mutex(lock, waiter, task, NULL,
+                                     RT_MUTEX_FULL_CHAINWALK);
+
+       if (ret && !rt_mutex_owner(lock)) {
+               /*
+                * Reset the return value. We might have
+                * returned with -EDEADLK and the owner
+                * released the lock while we were walking the
+                * pi chain.  Let the waiter sort it out.
+                */
+               ret = 0;
+       }
+
+       return ret;
+}
+
+/**
+ * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
+ * @lock:              the rt_mutex to take
+ * @waiter:            the pre-initialized rt_mutex_waiter
+ * @task:              the task to prepare
+ *
+ * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
+ * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
+ *
+ * NOTE: unlike __rt_mutex_start_proxy_lock this _DOES_ remove the @waiter
+ * on failure.
+ *
+ * Returns:
+ *  0 - task blocked on lock
+ *  1 - acquired the lock for task, caller should wake it up
+ * <0 - error
+ *
+ * Special API call for PI-futex support.
+ */
+int __sched rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
+                                     struct rt_mutex_waiter *waiter,
+                                     struct task_struct *task)
+{
+       int ret;
+
+       raw_spin_lock_irq(&lock->wait_lock);
+       ret = __rt_mutex_start_proxy_lock(lock, waiter, task);
+       if (unlikely(ret))
+               remove_waiter(lock, waiter);
+       raw_spin_unlock_irq(&lock->wait_lock);
+
+       return ret;
+}
+
+/**
+ * rt_mutex_wait_proxy_lock() - Wait for lock acquisition
+ * @lock:              the rt_mutex we were woken on
+ * @to:                        the timeout, null if none. hrtimer should already have
+ *                     been started.
+ * @waiter:            the pre-initialized rt_mutex_waiter
+ *
+ * Wait for the lock acquisition started on our behalf by
+ * rt_mutex_start_proxy_lock(). Upon failure, the caller must call
+ * rt_mutex_cleanup_proxy_lock().
+ *
+ * Returns:
+ *  0 - success
+ * <0 - error, one of -EINTR, -ETIMEDOUT
+ *
+ * Special API call for PI-futex support
+ */
+int __sched rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock,
+                                    struct hrtimer_sleeper *to,
+                                    struct rt_mutex_waiter *waiter)
+{
+       int ret;
+
+       raw_spin_lock_irq(&lock->wait_lock);
+       /* sleep on the mutex */
+       set_current_state(TASK_INTERRUPTIBLE);
+       ret = rt_mutex_slowlock_block(lock, NULL, TASK_INTERRUPTIBLE, to, waiter);
+       /*
+        * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
+        * have to fix that up.
+        */
+       fixup_rt_mutex_waiters(lock);
+       raw_spin_unlock_irq(&lock->wait_lock);
+
+       return ret;
+}
+
+/**
+ * rt_mutex_cleanup_proxy_lock() - Cleanup failed lock acquisition
+ * @lock:              the rt_mutex we were woken on
+ * @waiter:            the pre-initialized rt_mutex_waiter
+ *
+ * Attempt to clean up after a failed __rt_mutex_start_proxy_lock() or
+ * rt_mutex_wait_proxy_lock().
+ *
+ * Unless we acquired the lock; we're still enqueued on the wait-list and can
+ * in fact still be granted ownership until we're removed. Therefore we can
+ * find we are in fact the owner and must disregard the
+ * rt_mutex_wait_proxy_lock() failure.
+ *
+ * Returns:
+ *  true  - did the cleanup, we done.
+ *  false - we acquired the lock after rt_mutex_wait_proxy_lock() returned,
+ *          caller should disregards its return value.
+ *
+ * Special API call for PI-futex support
+ */
+bool __sched rt_mutex_cleanup_proxy_lock(struct rt_mutex_base *lock,
+                                        struct rt_mutex_waiter *waiter)
+{
+       bool cleanup = false;
+
+       raw_spin_lock_irq(&lock->wait_lock);
+       /*
+        * Do an unconditional try-lock, this deals with the lock stealing
+        * state where __rt_mutex_futex_unlock() -> mark_wakeup_next_waiter()
+        * sets a NULL owner.
+        *
+        * We're not interested in the return value, because the subsequent
+        * test on rt_mutex_owner() will infer that. If the trylock succeeded,
+        * we will own the lock and it will have removed the waiter. If we
+        * failed the trylock, we're still not owner and we need to remove
+        * ourselves.
+        */
+       try_to_take_rt_mutex(lock, current, waiter);
+       /*
+        * Unless we're the owner; we're still enqueued on the wait_list.
+        * So check if we became owner, if not, take us off the wait_list.
+        */
+       if (rt_mutex_owner(lock) != current) {
+               remove_waiter(lock, waiter);
+               cleanup = true;
+       }
+       /*
+        * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
+        * have to fix that up.
+        */
+       fixup_rt_mutex_waiters(lock);
+
+       raw_spin_unlock_irq(&lock->wait_lock);
+
+       return cleanup;
+}
+
+/*
+ * Recheck the pi chain, in case we got a priority setting
+ *
+ * Called from sched_setscheduler
+ */
+void __sched rt_mutex_adjust_pi(struct task_struct *task)
+{
+       struct rt_mutex_waiter *waiter;
+       struct rt_mutex_base *next_lock;
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&task->pi_lock, flags);
+
+       waiter = task->pi_blocked_on;
+       if (!waiter || rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
+               raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+               return;
+       }
+       next_lock = waiter->lock;
+       raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+
+       /* gets dropped in rt_mutex_adjust_prio_chain()! */
+       get_task_struct(task);
+
+       rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
+                                  next_lock, NULL, task);
+}
+
+/*
+ * Performs the wakeup of the top-waiter and re-enables preemption.
+ */
+void __sched rt_mutex_postunlock(struct rt_wake_q_head *wqh)
+{
+       rt_mutex_wake_up_q(wqh);
+}
+
+#ifdef CONFIG_DEBUG_RT_MUTEXES
+void rt_mutex_debug_task_free(struct task_struct *task)
+{
+       DEBUG_LOCKS_WARN_ON(!RB_EMPTY_ROOT(&task->pi_waiters.rb_root));
+       DEBUG_LOCKS_WARN_ON(task->pi_blocked_on);
+}
+#endif
+
+#ifdef CONFIG_PREEMPT_RT
+/* Mutexes */
+void __mutex_rt_init(struct mutex *mutex, const char *name,
+                    struct lock_class_key *key)
+{
+       debug_check_no_locks_freed((void *)mutex, sizeof(*mutex));
+       lockdep_init_map_wait(&mutex->dep_map, name, key, 0, LD_WAIT_SLEEP);
+}
+EXPORT_SYMBOL(__mutex_rt_init);
+
+static __always_inline int __mutex_lock_common(struct mutex *lock,
+                                              unsigned int state,
+                                              unsigned int subclass,
+                                              struct lockdep_map *nest_lock,
+                                              unsigned long ip)
+{
+       int ret;
+
+       might_sleep();
+       mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
+       ret = __rt_mutex_lock(&lock->rtmutex, state);
+       if (ret)
+               mutex_release(&lock->dep_map, ip);
+       else
+               lock_acquired(&lock->dep_map, ip);
+       return ret;
+}
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+void __sched mutex_lock_nested(struct mutex *lock, unsigned int subclass)
+{
+       __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
+}
+EXPORT_SYMBOL_GPL(mutex_lock_nested);
+
+void __sched _mutex_lock_nest_lock(struct mutex *lock,
+                                  struct lockdep_map *nest_lock)
+{
+       __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, nest_lock, _RET_IP_);
+}
+EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
+
+int __sched mutex_lock_interruptible_nested(struct mutex *lock,
+                                           unsigned int subclass)
+{
+       return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
+}
+EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
+
+int __sched mutex_lock_killable_nested(struct mutex *lock,
+                                           unsigned int subclass)
+{
+       return __mutex_lock_common(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
+}
+EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
+
+void __sched mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
+{
+       int token;
+
+       might_sleep();
+
+       token = io_schedule_prepare();
+       __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
+       io_schedule_finish(token);
+}
+EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
+
+#else /* CONFIG_DEBUG_LOCK_ALLOC */
+
+void __sched mutex_lock(struct mutex *lock)
+{
+       __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
+}
+EXPORT_SYMBOL(mutex_lock);
+
+int __sched mutex_lock_interruptible(struct mutex *lock)
+{
+       return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
+}
+EXPORT_SYMBOL(mutex_lock_interruptible);
+
+int __sched mutex_lock_killable(struct mutex *lock)
+{
+       return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
+}
+EXPORT_SYMBOL(mutex_lock_killable);
+
+void __sched mutex_lock_io(struct mutex *lock)
+{
+       int token = io_schedule_prepare();
+
+       __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
+       io_schedule_finish(token);
+}
+EXPORT_SYMBOL(mutex_lock_io);
+#endif /* !CONFIG_DEBUG_LOCK_ALLOC */
+
+int __sched mutex_trylock(struct mutex *lock)
+{
+       int ret;
+
+       if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES) && WARN_ON_ONCE(!in_task()))
+               return 0;
+
+       ret = __rt_mutex_trylock(&lock->rtmutex);
+       if (ret)
+               mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
+
+       return ret;
+}
+EXPORT_SYMBOL(mutex_trylock);
+
+void __sched mutex_unlock(struct mutex *lock)
+{
+       mutex_release(&lock->dep_map, _RET_IP_);
+       __rt_mutex_unlock(&lock->rtmutex);
+}
+EXPORT_SYMBOL(mutex_unlock);
+
+#endif /* CONFIG_PREEMPT_RT */
index a90c22a..c47e836 100644 (file)
  * @pi_tree_entry:     pi node to enqueue into the mutex owner waiters tree
  * @task:              task reference to the blocked task
  * @lock:              Pointer to the rt_mutex on which the waiter blocks
+ * @wake_state:                Wakeup state to use (TASK_NORMAL or TASK_RTLOCK_WAIT)
  * @prio:              Priority of the waiter
  * @deadline:          Deadline of the waiter if applicable
+ * @ww_ctx:            WW context pointer
  */
 struct rt_mutex_waiter {
        struct rb_node          tree_entry;
        struct rb_node          pi_tree_entry;
        struct task_struct      *task;
-       struct rt_mutex         *lock;
+       struct rt_mutex_base    *lock;
+       unsigned int            wake_state;
        int                     prio;
        u64                     deadline;
+       struct ww_acquire_ctx   *ww_ctx;
 };
 
+/**
+ * rt_wake_q_head - Wrapper around regular wake_q_head to support
+ *                 "sleeping" spinlocks on RT
+ * @head:              The regular wake_q_head for sleeping lock variants
+ * @rtlock_task:       Task pointer for RT lock (spin/rwlock) wakeups
+ */
+struct rt_wake_q_head {
+       struct wake_q_head      head;
+       struct task_struct      *rtlock_task;
+};
+
+#define DEFINE_RT_WAKE_Q(name)                                         \
+       struct rt_wake_q_head name = {                                  \
+               .head           = WAKE_Q_HEAD_INITIALIZER(name.head),   \
+               .rtlock_task    = NULL,                                 \
+       }
+
+/*
+ * PI-futex support (proxy locking functions, etc.):
+ */
+extern void rt_mutex_init_proxy_locked(struct rt_mutex_base *lock,
+                                      struct task_struct *proxy_owner);
+extern void rt_mutex_proxy_unlock(struct rt_mutex_base *lock);
+extern int __rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
+                                    struct rt_mutex_waiter *waiter,
+                                    struct task_struct *task);
+extern int rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
+                                    struct rt_mutex_waiter *waiter,
+                                    struct task_struct *task);
+extern int rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock,
+                              struct hrtimer_sleeper *to,
+                              struct rt_mutex_waiter *waiter);
+extern bool rt_mutex_cleanup_proxy_lock(struct rt_mutex_base *lock,
+                                struct rt_mutex_waiter *waiter);
+
+extern int rt_mutex_futex_trylock(struct rt_mutex_base *l);
+extern int __rt_mutex_futex_trylock(struct rt_mutex_base *l);
+
+extern void rt_mutex_futex_unlock(struct rt_mutex_base *lock);
+extern bool __rt_mutex_futex_unlock(struct rt_mutex_base *lock,
+                               struct rt_wake_q_head *wqh);
+
+extern void rt_mutex_postunlock(struct rt_wake_q_head *wqh);
+
 /*
  * Must be guarded because this header is included from rcu/tree_plugin.h
  * unconditionally.
  */
 #ifdef CONFIG_RT_MUTEXES
-static inline int rt_mutex_has_waiters(struct rt_mutex *lock)
+static inline int rt_mutex_has_waiters(struct rt_mutex_base *lock)
 {
        return !RB_EMPTY_ROOT(&lock->waiters.rb_root);
 }
 
-static inline struct rt_mutex_waiter *rt_mutex_top_waiter(struct rt_mutex *lock)
+/*
+ * Lockless speculative check whether @waiter is still the top waiter on
+ * @lock. This is solely comparing pointers and not derefencing the
+ * leftmost entry which might be about to vanish.
+ */
+static inline bool rt_mutex_waiter_is_top_waiter(struct rt_mutex_base *lock,
+                                                struct rt_mutex_waiter *waiter)
+{
+       struct rb_node *leftmost = rb_first_cached(&lock->waiters);
+
+       return rb_entry(leftmost, struct rt_mutex_waiter, tree_entry) == waiter;
+}
+
+static inline struct rt_mutex_waiter *rt_mutex_top_waiter(struct rt_mutex_base *lock)
 {
        struct rb_node *leftmost = rb_first_cached(&lock->waiters);
        struct rt_mutex_waiter *w = NULL;
@@ -72,19 +133,12 @@ static inline struct rt_mutex_waiter *task_top_pi_waiter(struct task_struct *p)
 
 #define RT_MUTEX_HAS_WAITERS   1UL
 
-static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock)
+static inline struct task_struct *rt_mutex_owner(struct rt_mutex_base *lock)
 {
        unsigned long owner = (unsigned long) READ_ONCE(lock->owner);
 
        return (struct task_struct *) (owner & ~RT_MUTEX_HAS_WAITERS);
 }
-#else /* CONFIG_RT_MUTEXES */
-/* Used in rcu/tree_plugin.h */
-static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock)
-{
-       return NULL;
-}
-#endif  /* !CONFIG_RT_MUTEXES */
 
 /*
  * Constants for rt mutex functions which have a selectable deadlock
@@ -101,49 +155,21 @@ enum rtmutex_chainwalk {
        RT_MUTEX_FULL_CHAINWALK,
 };
 
-static inline void __rt_mutex_basic_init(struct rt_mutex *lock)
+static inline void __rt_mutex_base_init(struct rt_mutex_base *lock)
 {
-       lock->owner = NULL;
        raw_spin_lock_init(&lock->wait_lock);
        lock->waiters = RB_ROOT_CACHED;
+       lock->owner = NULL;
 }
 
-/*
- * PI-futex support (proxy locking functions, etc.):
- */
-extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
-                                      struct task_struct *proxy_owner);
-extern void rt_mutex_proxy_unlock(struct rt_mutex *lock);
-extern void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter);
-extern int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
-                                    struct rt_mutex_waiter *waiter,
-                                    struct task_struct *task);
-extern int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
-                                    struct rt_mutex_waiter *waiter,
-                                    struct task_struct *task);
-extern int rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
-                              struct hrtimer_sleeper *to,
-                              struct rt_mutex_waiter *waiter);
-extern bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock,
-                                struct rt_mutex_waiter *waiter);
-
-extern int rt_mutex_futex_trylock(struct rt_mutex *l);
-extern int __rt_mutex_futex_trylock(struct rt_mutex *l);
-
-extern void rt_mutex_futex_unlock(struct rt_mutex *lock);
-extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock,
-                                struct wake_q_head *wqh);
-
-extern void rt_mutex_postunlock(struct wake_q_head *wake_q);
-
 /* Debug functions */
-static inline void debug_rt_mutex_unlock(struct rt_mutex *lock)
+static inline void debug_rt_mutex_unlock(struct rt_mutex_base *lock)
 {
        if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES))
                DEBUG_LOCKS_WARN_ON(rt_mutex_owner(lock) != current);
 }
 
-static inline void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock)
+static inline void debug_rt_mutex_proxy_unlock(struct rt_mutex_base *lock)
 {
        if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES))
                DEBUG_LOCKS_WARN_ON(!rt_mutex_owner(lock));
@@ -161,4 +187,27 @@ static inline void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter)
                memset(waiter, 0x22, sizeof(*waiter));
 }
 
+static inline void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
+{
+       debug_rt_mutex_init_waiter(waiter);
+       RB_CLEAR_NODE(&waiter->pi_tree_entry);
+       RB_CLEAR_NODE(&waiter->tree_entry);
+       waiter->wake_state = TASK_NORMAL;
+       waiter->task = NULL;
+}
+
+static inline void rt_mutex_init_rtlock_waiter(struct rt_mutex_waiter *waiter)
+{
+       rt_mutex_init_waiter(waiter);
+       waiter->wake_state = TASK_RTLOCK_WAIT;
+}
+
+#else /* CONFIG_RT_MUTEXES */
+/* Used in rcu/tree_plugin.h */
+static inline struct task_struct *rt_mutex_owner(struct rt_mutex_base *lock)
+{
+       return NULL;
+}
+#endif  /* !CONFIG_RT_MUTEXES */
+
 #endif
diff --git a/kernel/locking/rwbase_rt.c b/kernel/locking/rwbase_rt.c
new file mode 100644 (file)
index 0000000..4ba1508
--- /dev/null
@@ -0,0 +1,263 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * RT-specific reader/writer semaphores and reader/writer locks
+ *
+ * down_write/write_lock()
+ *  1) Lock rtmutex
+ *  2) Remove the reader BIAS to force readers into the slow path
+ *  3) Wait until all readers have left the critical section
+ *  4) Mark it write locked
+ *
+ * up_write/write_unlock()
+ *  1) Remove the write locked marker
+ *  2) Set the reader BIAS, so readers can use the fast path again
+ *  3) Unlock rtmutex, to release blocked readers
+ *
+ * down_read/read_lock()
+ *  1) Try fast path acquisition (reader BIAS is set)
+ *  2) Take tmutex::wait_lock, which protects the writelocked flag
+ *  3) If !writelocked, acquire it for read
+ *  4) If writelocked, block on tmutex
+ *  5) unlock rtmutex, goto 1)
+ *
+ * up_read/read_unlock()
+ *  1) Try fast path release (reader count != 1)
+ *  2) Wake the writer waiting in down_write()/write_lock() #3
+ *
+ * down_read/read_lock()#3 has the consequence, that rw semaphores and rw
+ * locks on RT are not writer fair, but writers, which should be avoided in
+ * RT tasks (think mmap_sem), are subject to the rtmutex priority/DL
+ * inheritance mechanism.
+ *
+ * It's possible to make the rw primitives writer fair by keeping a list of
+ * active readers. A blocked writer would force all newly incoming readers
+ * to block on the rtmutex, but the rtmutex would have to be proxy locked
+ * for one reader after the other. We can't use multi-reader inheritance
+ * because there is no way to support that with SCHED_DEADLINE.
+ * Implementing the one by one reader boosting/handover mechanism is a
+ * major surgery for a very dubious value.
+ *
+ * The risk of writer starvation is there, but the pathological use cases
+ * which trigger it are not necessarily the typical RT workloads.
+ *
+ * Common code shared between RT rw_semaphore and rwlock
+ */
+
+static __always_inline int rwbase_read_trylock(struct rwbase_rt *rwb)
+{
+       int r;
+
+       /*
+        * Increment reader count, if sem->readers < 0, i.e. READER_BIAS is
+        * set.
+        */
+       for (r = atomic_read(&rwb->readers); r < 0;) {
+               if (likely(atomic_try_cmpxchg(&rwb->readers, &r, r + 1)))
+                       return 1;
+       }
+       return 0;
+}
+
+static int __sched __rwbase_read_lock(struct rwbase_rt *rwb,
+                                     unsigned int state)
+{
+       struct rt_mutex_base *rtm = &rwb->rtmutex;
+       int ret;
+
+       raw_spin_lock_irq(&rtm->wait_lock);
+       /*
+        * Allow readers, as long as the writer has not completely
+        * acquired the semaphore for write.
+        */
+       if (atomic_read(&rwb->readers) != WRITER_BIAS) {
+               atomic_inc(&rwb->readers);
+               raw_spin_unlock_irq(&rtm->wait_lock);
+               return 0;
+       }
+
+       /*
+        * Call into the slow lock path with the rtmutex->wait_lock
+        * held, so this can't result in the following race:
+        *
+        * Reader1              Reader2         Writer
+        *                      down_read()
+        *                                      down_write()
+        *                                      rtmutex_lock(m)
+        *                                      wait()
+        * down_read()
+        * unlock(m->wait_lock)
+        *                      up_read()
+        *                      wake(Writer)
+        *                                      lock(m->wait_lock)
+        *                                      sem->writelocked=true
+        *                                      unlock(m->wait_lock)
+        *
+        *                                      up_write()
+        *                                      sem->writelocked=false
+        *                                      rtmutex_unlock(m)
+        *                      down_read()
+        *                                      down_write()
+        *                                      rtmutex_lock(m)
+        *                                      wait()
+        * rtmutex_lock(m)
+        *
+        * That would put Reader1 behind the writer waiting on
+        * Reader2 to call up_read(), which might be unbound.
+        */
+
+       /*
+        * For rwlocks this returns 0 unconditionally, so the below
+        * !ret conditionals are optimized out.
+        */
+       ret = rwbase_rtmutex_slowlock_locked(rtm, state);
+
+       /*
+        * On success the rtmutex is held, so there can't be a writer
+        * active. Increment the reader count and immediately drop the
+        * rtmutex again.
+        *
+        * rtmutex->wait_lock has to be unlocked in any case of course.
+        */
+       if (!ret)
+               atomic_inc(&rwb->readers);
+       raw_spin_unlock_irq(&rtm->wait_lock);
+       if (!ret)
+               rwbase_rtmutex_unlock(rtm);
+       return ret;
+}
+
+static __always_inline int rwbase_read_lock(struct rwbase_rt *rwb,
+                                           unsigned int state)
+{
+       if (rwbase_read_trylock(rwb))
+               return 0;
+
+       return __rwbase_read_lock(rwb, state);
+}
+
+static void __sched __rwbase_read_unlock(struct rwbase_rt *rwb,
+                                        unsigned int state)
+{
+       struct rt_mutex_base *rtm = &rwb->rtmutex;
+       struct task_struct *owner;
+
+       raw_spin_lock_irq(&rtm->wait_lock);
+       /*
+        * Wake the writer, i.e. the rtmutex owner. It might release the
+        * rtmutex concurrently in the fast path (due to a signal), but to
+        * clean up rwb->readers it needs to acquire rtm->wait_lock. The
+        * worst case which can happen is a spurious wakeup.
+        */
+       owner = rt_mutex_owner(rtm);
+       if (owner)
+               wake_up_state(owner, state);
+
+       raw_spin_unlock_irq(&rtm->wait_lock);
+}
+
+static __always_inline void rwbase_read_unlock(struct rwbase_rt *rwb,
+                                              unsigned int state)
+{
+       /*
+        * rwb->readers can only hit 0 when a writer is waiting for the
+        * active readers to leave the critical section.
+        */
+       if (unlikely(atomic_dec_and_test(&rwb->readers)))
+               __rwbase_read_unlock(rwb, state);
+}
+
+static inline void __rwbase_write_unlock(struct rwbase_rt *rwb, int bias,
+                                        unsigned long flags)
+{
+       struct rt_mutex_base *rtm = &rwb->rtmutex;
+
+       atomic_add(READER_BIAS - bias, &rwb->readers);
+       raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
+       rwbase_rtmutex_unlock(rtm);
+}
+
+static inline void rwbase_write_unlock(struct rwbase_rt *rwb)
+{
+       struct rt_mutex_base *rtm = &rwb->rtmutex;
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&rtm->wait_lock, flags);
+       __rwbase_write_unlock(rwb, WRITER_BIAS, flags);
+}
+
+static inline void rwbase_write_downgrade(struct rwbase_rt *rwb)
+{
+       struct rt_mutex_base *rtm = &rwb->rtmutex;
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&rtm->wait_lock, flags);
+       /* Release it and account current as reader */
+       __rwbase_write_unlock(rwb, WRITER_BIAS - 1, flags);
+}
+
+static int __sched rwbase_write_lock(struct rwbase_rt *rwb,
+                                    unsigned int state)
+{
+       struct rt_mutex_base *rtm = &rwb->rtmutex;
+       unsigned long flags;
+
+       /* Take the rtmutex as a first step */
+       if (rwbase_rtmutex_lock_state(rtm, state))
+               return -EINTR;
+
+       /* Force readers into slow path */
+       atomic_sub(READER_BIAS, &rwb->readers);
+
+       raw_spin_lock_irqsave(&rtm->wait_lock, flags);
+       /*
+        * set_current_state() for rw_semaphore
+        * current_save_and_set_rtlock_wait_state() for rwlock
+        */
+       rwbase_set_and_save_current_state(state);
+
+       /* Block until all readers have left the critical section. */
+       for (; atomic_read(&rwb->readers);) {
+               /* Optimized out for rwlocks */
+               if (rwbase_signal_pending_state(state, current)) {
+                       __set_current_state(TASK_RUNNING);
+                       __rwbase_write_unlock(rwb, 0, flags);
+                       return -EINTR;
+               }
+               raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
+
+               /*
+                * Schedule and wait for the readers to leave the critical
+                * section. The last reader leaving it wakes the waiter.
+                */
+               if (atomic_read(&rwb->readers) != 0)
+                       rwbase_schedule();
+               set_current_state(state);
+               raw_spin_lock_irqsave(&rtm->wait_lock, flags);
+       }
+
+       atomic_set(&rwb->readers, WRITER_BIAS);
+       rwbase_restore_current_state();
+       raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
+       return 0;
+}
+
+static inline int rwbase_write_trylock(struct rwbase_rt *rwb)
+{
+       struct rt_mutex_base *rtm = &rwb->rtmutex;
+       unsigned long flags;
+
+       if (!rwbase_rtmutex_trylock(rtm))
+               return 0;
+
+       atomic_sub(READER_BIAS, &rwb->readers);
+
+       raw_spin_lock_irqsave(&rtm->wait_lock, flags);
+       if (!atomic_read(&rwb->readers)) {
+               atomic_set(&rwb->readers, WRITER_BIAS);
+               raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
+               return 1;
+       }
+       __rwbase_write_unlock(rwb, 0, flags);
+       return 0;
+}
index 16bfbb1..9215b4d 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/rwsem.h>
 #include <linux/atomic.h>
 
+#ifndef CONFIG_PREEMPT_RT
 #include "lock_events.h"
 
 /*
@@ -1165,7 +1166,7 @@ out_nolock:
  * handle waking up a waiter on the semaphore
  * - up_read/up_write has decremented the active part of count if we come here
  */
-static struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem, long count)
+static struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
 {
        unsigned long flags;
        DEFINE_WAKE_Q(wake_q);
@@ -1297,7 +1298,7 @@ static inline void __up_read(struct rw_semaphore *sem)
        if (unlikely((tmp & (RWSEM_LOCK_MASK|RWSEM_FLAG_WAITERS)) ==
                      RWSEM_FLAG_WAITERS)) {
                clear_nonspinnable(sem);
-               rwsem_wake(sem, tmp);
+               rwsem_wake(sem);
        }
 }
 
@@ -1319,7 +1320,7 @@ static inline void __up_write(struct rw_semaphore *sem)
        rwsem_clear_owner(sem);
        tmp = atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED, &sem->count);
        if (unlikely(tmp & RWSEM_FLAG_WAITERS))
-               rwsem_wake(sem, tmp);
+               rwsem_wake(sem);
 }
 
 /*
@@ -1344,6 +1345,114 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
                rwsem_downgrade_wake(sem);
 }
 
+#else /* !CONFIG_PREEMPT_RT */
+
+#define RT_MUTEX_BUILD_MUTEX
+#include "rtmutex.c"
+
+#define rwbase_set_and_save_current_state(state)       \
+       set_current_state(state)
+
+#define rwbase_restore_current_state()                 \
+       __set_current_state(TASK_RUNNING)
+
+#define rwbase_rtmutex_lock_state(rtm, state)          \
+       __rt_mutex_lock(rtm, state)
+
+#define rwbase_rtmutex_slowlock_locked(rtm, state)     \
+       __rt_mutex_slowlock_locked(rtm, NULL, state)
+
+#define rwbase_rtmutex_unlock(rtm)                     \
+       __rt_mutex_unlock(rtm)
+
+#define rwbase_rtmutex_trylock(rtm)                    \
+       __rt_mutex_trylock(rtm)
+
+#define rwbase_signal_pending_state(state, current)    \
+       signal_pending_state(state, current)
+
+#define rwbase_schedule()                              \
+       schedule()
+
+#include "rwbase_rt.c"
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+void __rwsem_init(struct rw_semaphore *sem, const char *name,
+                 struct lock_class_key *key)
+{
+       debug_check_no_locks_freed((void *)sem, sizeof(*sem));
+       lockdep_init_map_wait(&sem->dep_map, name, key, 0, LD_WAIT_SLEEP);
+}
+EXPORT_SYMBOL(__rwsem_init);
+#endif
+
+static inline void __down_read(struct rw_semaphore *sem)
+{
+       rwbase_read_lock(&sem->rwbase, TASK_UNINTERRUPTIBLE);
+}
+
+static inline int __down_read_interruptible(struct rw_semaphore *sem)
+{
+       return rwbase_read_lock(&sem->rwbase, TASK_INTERRUPTIBLE);
+}
+
+static inline int __down_read_killable(struct rw_semaphore *sem)
+{
+       return rwbase_read_lock(&sem->rwbase, TASK_KILLABLE);
+}
+
+static inline int __down_read_trylock(struct rw_semaphore *sem)
+{
+       return rwbase_read_trylock(&sem->rwbase);
+}
+
+static inline void __up_read(struct rw_semaphore *sem)
+{
+       rwbase_read_unlock(&sem->rwbase, TASK_NORMAL);
+}
+
+static inline void __sched __down_write(struct rw_semaphore *sem)
+{
+       rwbase_write_lock(&sem->rwbase, TASK_UNINTERRUPTIBLE);
+}
+
+static inline int __sched __down_write_killable(struct rw_semaphore *sem)
+{
+       return rwbase_write_lock(&sem->rwbase, TASK_KILLABLE);
+}
+
+static inline int __down_write_trylock(struct rw_semaphore *sem)
+{
+       return rwbase_write_trylock(&sem->rwbase);
+}
+
+static inline void __up_write(struct rw_semaphore *sem)
+{
+       rwbase_write_unlock(&sem->rwbase);
+}
+
+static inline void __downgrade_write(struct rw_semaphore *sem)
+{
+       rwbase_write_downgrade(&sem->rwbase);
+}
+
+/* Debug stubs for the common API */
+#define DEBUG_RWSEMS_WARN_ON(c, sem)
+
+static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem,
+                                           struct task_struct *owner)
+{
+}
+
+static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem)
+{
+       int count = atomic_read(&sem->rwbase.readers);
+
+       return count < 0 && count != READER_BIAS;
+}
+
+#endif /* CONFIG_PREEMPT_RT */
+
 /*
  * lock for reading
  */
index 9aa855a..9ee381e 100644 (file)
@@ -54,6 +54,7 @@ void down(struct semaphore *sem)
 {
        unsigned long flags;
 
+       might_sleep();
        raw_spin_lock_irqsave(&sem->lock, flags);
        if (likely(sem->count > 0))
                sem->count--;
@@ -77,6 +78,7 @@ int down_interruptible(struct semaphore *sem)
        unsigned long flags;
        int result = 0;
 
+       might_sleep();
        raw_spin_lock_irqsave(&sem->lock, flags);
        if (likely(sem->count > 0))
                sem->count--;
@@ -103,6 +105,7 @@ int down_killable(struct semaphore *sem)
        unsigned long flags;
        int result = 0;
 
+       might_sleep();
        raw_spin_lock_irqsave(&sem->lock, flags);
        if (likely(sem->count > 0))
                sem->count--;
@@ -157,6 +160,7 @@ int down_timeout(struct semaphore *sem, long timeout)
        unsigned long flags;
        int result = 0;
 
+       might_sleep();
        raw_spin_lock_irqsave(&sem->lock, flags);
        if (likely(sem->count > 0))
                sem->count--;
index c8d7ad9..c5830cf 100644 (file)
@@ -124,8 +124,11 @@ void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock)           \
  *         __[spin|read|write]_lock_bh()
  */
 BUILD_LOCK_OPS(spin, raw_spinlock);
+
+#ifndef CONFIG_PREEMPT_RT
 BUILD_LOCK_OPS(read, rwlock);
 BUILD_LOCK_OPS(write, rwlock);
+#endif
 
 #endif
 
@@ -209,6 +212,8 @@ void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock)
 EXPORT_SYMBOL(_raw_spin_unlock_bh);
 #endif
 
+#ifndef CONFIG_PREEMPT_RT
+
 #ifndef CONFIG_INLINE_READ_TRYLOCK
 int __lockfunc _raw_read_trylock(rwlock_t *lock)
 {
@@ -353,6 +358,8 @@ void __lockfunc _raw_write_unlock_bh(rwlock_t *lock)
 EXPORT_SYMBOL(_raw_write_unlock_bh);
 #endif
 
+#endif /* !CONFIG_PREEMPT_RT */
+
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 
 void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
index b9d9308..1423567 100644 (file)
@@ -31,6 +31,7 @@ void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
 
 EXPORT_SYMBOL(__raw_spin_lock_init);
 
+#ifndef CONFIG_PREEMPT_RT
 void __rwlock_init(rwlock_t *lock, const char *name,
                   struct lock_class_key *key)
 {
@@ -48,6 +49,7 @@ void __rwlock_init(rwlock_t *lock, const char *name,
 }
 
 EXPORT_SYMBOL(__rwlock_init);
+#endif
 
 static void spin_dump(raw_spinlock_t *lock, const char *msg)
 {
@@ -139,6 +141,7 @@ void do_raw_spin_unlock(raw_spinlock_t *lock)
        arch_spin_unlock(&lock->raw_lock);
 }
 
+#ifndef CONFIG_PREEMPT_RT
 static void rwlock_bug(rwlock_t *lock, const char *msg)
 {
        if (!debug_locks_off())
@@ -228,3 +231,5 @@ void do_raw_write_unlock(rwlock_t *lock)
        debug_write_unlock(lock);
        arch_write_unlock(&lock->raw_lock);
 }
+
+#endif /* !CONFIG_PREEMPT_RT */
diff --git a/kernel/locking/spinlock_rt.c b/kernel/locking/spinlock_rt.c
new file mode 100644 (file)
index 0000000..d2912e4
--- /dev/null
@@ -0,0 +1,263 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PREEMPT_RT substitution for spin/rw_locks
+ *
+ * spinlocks and rwlocks on RT are based on rtmutexes, with a few twists to
+ * resemble the non RT semantics:
+ *
+ * - Contrary to plain rtmutexes, spinlocks and rwlocks are state
+ *   preserving. The task state is saved before blocking on the underlying
+ *   rtmutex, and restored when the lock has been acquired. Regular wakeups
+ *   during that time are redirected to the saved state so no wake up is
+ *   missed.
+ *
+ * - Non RT spin/rwlocks disable preemption and eventually interrupts.
+ *   Disabling preemption has the side effect of disabling migration and
+ *   preventing RCU grace periods.
+ *
+ *   The RT substitutions explicitly disable migration and take
+ *   rcu_read_lock() across the lock held section.
+ */
+#include <linux/spinlock.h>
+#include <linux/export.h>
+
+#define RT_MUTEX_BUILD_SPINLOCKS
+#include "rtmutex.c"
+
+static __always_inline void rtlock_lock(struct rt_mutex_base *rtm)
+{
+       if (unlikely(!rt_mutex_cmpxchg_acquire(rtm, NULL, current)))
+               rtlock_slowlock(rtm);
+}
+
+static __always_inline void __rt_spin_lock(spinlock_t *lock)
+{
+       ___might_sleep(__FILE__, __LINE__, 0);
+       rtlock_lock(&lock->lock);
+       rcu_read_lock();
+       migrate_disable();
+}
+
+void __sched rt_spin_lock(spinlock_t *lock)
+{
+       spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+       __rt_spin_lock(lock);
+}
+EXPORT_SYMBOL(rt_spin_lock);
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+void __sched rt_spin_lock_nested(spinlock_t *lock, int subclass)
+{
+       spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
+       __rt_spin_lock(lock);
+}
+EXPORT_SYMBOL(rt_spin_lock_nested);
+
+void __sched rt_spin_lock_nest_lock(spinlock_t *lock,
+                                   struct lockdep_map *nest_lock)
+{
+       spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
+       __rt_spin_lock(lock);
+}
+EXPORT_SYMBOL(rt_spin_lock_nest_lock);
+#endif
+
+void __sched rt_spin_unlock(spinlock_t *lock)
+{
+       spin_release(&lock->dep_map, _RET_IP_);
+       migrate_enable();
+       rcu_read_unlock();
+
+       if (unlikely(!rt_mutex_cmpxchg_release(&lock->lock, current, NULL)))
+               rt_mutex_slowunlock(&lock->lock);
+}
+EXPORT_SYMBOL(rt_spin_unlock);
+
+/*
+ * Wait for the lock to get unlocked: instead of polling for an unlock
+ * (like raw spinlocks do), lock and unlock, to force the kernel to
+ * schedule if there's contention:
+ */
+void __sched rt_spin_lock_unlock(spinlock_t *lock)
+{
+       spin_lock(lock);
+       spin_unlock(lock);
+}
+EXPORT_SYMBOL(rt_spin_lock_unlock);
+
+static __always_inline int __rt_spin_trylock(spinlock_t *lock)
+{
+       int ret = 1;
+
+       if (unlikely(!rt_mutex_cmpxchg_acquire(&lock->lock, NULL, current)))
+               ret = rt_mutex_slowtrylock(&lock->lock);
+
+       if (ret) {
+               spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
+               rcu_read_lock();
+               migrate_disable();
+       }
+       return ret;
+}
+
+int __sched rt_spin_trylock(spinlock_t *lock)
+{
+       return __rt_spin_trylock(lock);
+}
+EXPORT_SYMBOL(rt_spin_trylock);
+
+int __sched rt_spin_trylock_bh(spinlock_t *lock)
+{
+       int ret;
+
+       local_bh_disable();
+       ret = __rt_spin_trylock(lock);
+       if (!ret)
+               local_bh_enable();
+       return ret;
+}
+EXPORT_SYMBOL(rt_spin_trylock_bh);
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+void __rt_spin_lock_init(spinlock_t *lock, const char *name,
+                        struct lock_class_key *key, bool percpu)
+{
+       u8 type = percpu ? LD_LOCK_PERCPU : LD_LOCK_NORMAL;
+
+       debug_check_no_locks_freed((void *)lock, sizeof(*lock));
+       lockdep_init_map_type(&lock->dep_map, name, key, 0, LD_WAIT_CONFIG,
+                             LD_WAIT_INV, type);
+}
+EXPORT_SYMBOL(__rt_spin_lock_init);
+#endif
+
+/*
+ * RT-specific reader/writer locks
+ */
+#define rwbase_set_and_save_current_state(state)       \
+       current_save_and_set_rtlock_wait_state()
+
+#define rwbase_restore_current_state()                 \
+       current_restore_rtlock_saved_state()
+
+static __always_inline int
+rwbase_rtmutex_lock_state(struct rt_mutex_base *rtm, unsigned int state)
+{
+       if (unlikely(!rt_mutex_cmpxchg_acquire(rtm, NULL, current)))
+               rtlock_slowlock(rtm);
+       return 0;
+}
+
+static __always_inline int
+rwbase_rtmutex_slowlock_locked(struct rt_mutex_base *rtm, unsigned int state)
+{
+       rtlock_slowlock_locked(rtm);
+       return 0;
+}
+
+static __always_inline void rwbase_rtmutex_unlock(struct rt_mutex_base *rtm)
+{
+       if (likely(rt_mutex_cmpxchg_acquire(rtm, current, NULL)))
+               return;
+
+       rt_mutex_slowunlock(rtm);
+}
+
+static __always_inline int  rwbase_rtmutex_trylock(struct rt_mutex_base *rtm)
+{
+       if (likely(rt_mutex_cmpxchg_acquire(rtm, NULL, current)))
+               return 1;
+
+       return rt_mutex_slowtrylock(rtm);
+}
+
+#define rwbase_signal_pending_state(state, current)    (0)
+
+#define rwbase_schedule()                              \
+       schedule_rtlock()
+
+#include "rwbase_rt.c"
+/*
+ * The common functions which get wrapped into the rwlock API.
+ */
+int __sched rt_read_trylock(rwlock_t *rwlock)
+{
+       int ret;
+
+       ret = rwbase_read_trylock(&rwlock->rwbase);
+       if (ret) {
+               rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_);
+               rcu_read_lock();
+               migrate_disable();
+       }
+       return ret;
+}
+EXPORT_SYMBOL(rt_read_trylock);
+
+int __sched rt_write_trylock(rwlock_t *rwlock)
+{
+       int ret;
+
+       ret = rwbase_write_trylock(&rwlock->rwbase);
+       if (ret) {
+               rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
+               rcu_read_lock();
+               migrate_disable();
+       }
+       return ret;
+}
+EXPORT_SYMBOL(rt_write_trylock);
+
+void __sched rt_read_lock(rwlock_t *rwlock)
+{
+       ___might_sleep(__FILE__, __LINE__, 0);
+       rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_);
+       rwbase_read_lock(&rwlock->rwbase, TASK_RTLOCK_WAIT);
+       rcu_read_lock();
+       migrate_disable();
+}
+EXPORT_SYMBOL(rt_read_lock);
+
+void __sched rt_write_lock(rwlock_t *rwlock)
+{
+       ___might_sleep(__FILE__, __LINE__, 0);
+       rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
+       rwbase_write_lock(&rwlock->rwbase, TASK_RTLOCK_WAIT);
+       rcu_read_lock();
+       migrate_disable();
+}
+EXPORT_SYMBOL(rt_write_lock);
+
+void __sched rt_read_unlock(rwlock_t *rwlock)
+{
+       rwlock_release(&rwlock->dep_map, _RET_IP_);
+       migrate_enable();
+       rcu_read_unlock();
+       rwbase_read_unlock(&rwlock->rwbase, TASK_RTLOCK_WAIT);
+}
+EXPORT_SYMBOL(rt_read_unlock);
+
+void __sched rt_write_unlock(rwlock_t *rwlock)
+{
+       rwlock_release(&rwlock->dep_map, _RET_IP_);
+       rcu_read_unlock();
+       migrate_enable();
+       rwbase_write_unlock(&rwlock->rwbase);
+}
+EXPORT_SYMBOL(rt_write_unlock);
+
+int __sched rt_rwlock_is_contended(rwlock_t *rwlock)
+{
+       return rw_base_is_contended(&rwlock->rwbase);
+}
+EXPORT_SYMBOL(rt_rwlock_is_contended);
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+void __rt_rwlock_init(rwlock_t *rwlock, const char *name,
+                     struct lock_class_key *key)
+{
+       debug_check_no_locks_freed((void *)rwlock, sizeof(*rwlock));
+       lockdep_init_map_wait(&rwlock->dep_map, name, key, 0, LD_WAIT_CONFIG);
+}
+EXPORT_SYMBOL(__rt_rwlock_init);
+#endif
diff --git a/kernel/locking/ww_mutex.h b/kernel/locking/ww_mutex.h
new file mode 100644 (file)
index 0000000..56f1392
--- /dev/null
@@ -0,0 +1,569 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef WW_RT
+
+#define MUTEX          mutex
+#define MUTEX_WAITER   mutex_waiter
+
+static inline struct mutex_waiter *
+__ww_waiter_first(struct mutex *lock)
+{
+       struct mutex_waiter *w;
+
+       w = list_first_entry(&lock->wait_list, struct mutex_waiter, list);
+       if (list_entry_is_head(w, &lock->wait_list, list))
+               return NULL;
+
+       return w;
+}
+
+static inline struct mutex_waiter *
+__ww_waiter_next(struct mutex *lock, struct mutex_waiter *w)
+{
+       w = list_next_entry(w, list);
+       if (list_entry_is_head(w, &lock->wait_list, list))
+               return NULL;
+
+       return w;
+}
+
+static inline struct mutex_waiter *
+__ww_waiter_prev(struct mutex *lock, struct mutex_waiter *w)
+{
+       w = list_prev_entry(w, list);
+       if (list_entry_is_head(w, &lock->wait_list, list))
+               return NULL;
+
+       return w;
+}
+
+static inline struct mutex_waiter *
+__ww_waiter_last(struct mutex *lock)
+{
+       struct mutex_waiter *w;
+
+       w = list_last_entry(&lock->wait_list, struct mutex_waiter, list);
+       if (list_entry_is_head(w, &lock->wait_list, list))
+               return NULL;
+
+       return w;
+}
+
+static inline void
+__ww_waiter_add(struct mutex *lock, struct mutex_waiter *waiter, struct mutex_waiter *pos)
+{
+       struct list_head *p = &lock->wait_list;
+       if (pos)
+               p = &pos->list;
+       __mutex_add_waiter(lock, waiter, p);
+}
+
+static inline struct task_struct *
+__ww_mutex_owner(struct mutex *lock)
+{
+       return __mutex_owner(lock);
+}
+
+static inline bool
+__ww_mutex_has_waiters(struct mutex *lock)
+{
+       return atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS;
+}
+
+static inline void lock_wait_lock(struct mutex *lock)
+{
+       raw_spin_lock(&lock->wait_lock);
+}
+
+static inline void unlock_wait_lock(struct mutex *lock)
+{
+       raw_spin_unlock(&lock->wait_lock);
+}
+
+static inline void lockdep_assert_wait_lock_held(struct mutex *lock)
+{
+       lockdep_assert_held(&lock->wait_lock);
+}
+
+#else /* WW_RT */
+
+#define MUTEX          rt_mutex
+#define MUTEX_WAITER   rt_mutex_waiter
+
+static inline struct rt_mutex_waiter *
+__ww_waiter_first(struct rt_mutex *lock)
+{
+       struct rb_node *n = rb_first(&lock->rtmutex.waiters.rb_root);
+       if (!n)
+               return NULL;
+       return rb_entry(n, struct rt_mutex_waiter, tree_entry);
+}
+
+static inline struct rt_mutex_waiter *
+__ww_waiter_next(struct rt_mutex *lock, struct rt_mutex_waiter *w)
+{
+       struct rb_node *n = rb_next(&w->tree_entry);
+       if (!n)
+               return NULL;
+       return rb_entry(n, struct rt_mutex_waiter, tree_entry);
+}
+
+static inline struct rt_mutex_waiter *
+__ww_waiter_prev(struct rt_mutex *lock, struct rt_mutex_waiter *w)
+{
+       struct rb_node *n = rb_prev(&w->tree_entry);
+       if (!n)
+               return NULL;
+       return rb_entry(n, struct rt_mutex_waiter, tree_entry);
+}
+
+static inline struct rt_mutex_waiter *
+__ww_waiter_last(struct rt_mutex *lock)
+{
+       struct rb_node *n = rb_last(&lock->rtmutex.waiters.rb_root);
+       if (!n)
+               return NULL;
+       return rb_entry(n, struct rt_mutex_waiter, tree_entry);
+}
+
+static inline void
+__ww_waiter_add(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, struct rt_mutex_waiter *pos)
+{
+       /* RT unconditionally adds the waiter first and then removes it on error */
+}
+
+static inline struct task_struct *
+__ww_mutex_owner(struct rt_mutex *lock)
+{
+       return rt_mutex_owner(&lock->rtmutex);
+}
+
+static inline bool
+__ww_mutex_has_waiters(struct rt_mutex *lock)
+{
+       return rt_mutex_has_waiters(&lock->rtmutex);
+}
+
+static inline void lock_wait_lock(struct rt_mutex *lock)
+{
+       raw_spin_lock(&lock->rtmutex.wait_lock);
+}
+
+static inline void unlock_wait_lock(struct rt_mutex *lock)
+{
+       raw_spin_unlock(&lock->rtmutex.wait_lock);
+}
+
+static inline void lockdep_assert_wait_lock_held(struct rt_mutex *lock)
+{
+       lockdep_assert_held(&lock->rtmutex.wait_lock);
+}
+
+#endif /* WW_RT */
+
+/*
+ * Wait-Die:
+ *   The newer transactions are killed when:
+ *     It (the new transaction) makes a request for a lock being held
+ *     by an older transaction.
+ *
+ * Wound-Wait:
+ *   The newer transactions are wounded when:
+ *     An older transaction makes a request for a lock being held by
+ *     the newer transaction.
+ */
+
+/*
+ * Associate the ww_mutex @ww with the context @ww_ctx under which we acquired
+ * it.
+ */
+static __always_inline void
+ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
+{
+#ifdef DEBUG_WW_MUTEXES
+       /*
+        * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
+        * but released with a normal mutex_unlock in this call.
+        *
+        * This should never happen, always use ww_mutex_unlock.
+        */
+       DEBUG_LOCKS_WARN_ON(ww->ctx);
+
+       /*
+        * Not quite done after calling ww_acquire_done() ?
+        */
+       DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
+
+       if (ww_ctx->contending_lock) {
+               /*
+                * After -EDEADLK you tried to
+                * acquire a different ww_mutex? Bad!
+                */
+               DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
+
+               /*
+                * You called ww_mutex_lock after receiving -EDEADLK,
+                * but 'forgot' to unlock everything else first?
+                */
+               DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
+               ww_ctx->contending_lock = NULL;
+       }
+
+       /*
+        * Naughty, using a different class will lead to undefined behavior!
+        */
+       DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
+#endif
+       ww_ctx->acquired++;
+       ww->ctx = ww_ctx;
+}
+
+/*
+ * Determine if @a is 'less' than @b. IOW, either @a is a lower priority task
+ * or, when of equal priority, a younger transaction than @b.
+ *
+ * Depending on the algorithm, @a will either need to wait for @b, or die.
+ */
+static inline bool
+__ww_ctx_less(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
+{
+/*
+ * Can only do the RT prio for WW_RT, because task->prio isn't stable due to PI,
+ * so the wait_list ordering will go wobbly. rt_mutex re-queues the waiter and
+ * isn't affected by this.
+ */
+#ifdef WW_RT
+       /* kernel prio; less is more */
+       int a_prio = a->task->prio;
+       int b_prio = b->task->prio;
+
+       if (rt_prio(a_prio) || rt_prio(b_prio)) {
+
+               if (a_prio > b_prio)
+                       return true;
+
+               if (a_prio < b_prio)
+                       return false;
+
+               /* equal static prio */
+
+               if (dl_prio(a_prio)) {
+                       if (dl_time_before(b->task->dl.deadline,
+                                          a->task->dl.deadline))
+                               return true;
+
+                       if (dl_time_before(a->task->dl.deadline,
+                                          b->task->dl.deadline))
+                               return false;
+               }
+
+               /* equal prio */
+       }
+#endif
+
+       /* FIFO order tie break -- bigger is younger */
+       return (signed long)(a->stamp - b->stamp) > 0;
+}
+
+/*
+ * Wait-Die; wake a lesser waiter context (when locks held) such that it can
+ * die.
+ *
+ * Among waiters with context, only the first one can have other locks acquired
+ * already (ctx->acquired > 0), because __ww_mutex_add_waiter() and
+ * __ww_mutex_check_kill() wake any but the earliest context.
+ */
+static bool
+__ww_mutex_die(struct MUTEX *lock, struct MUTEX_WAITER *waiter,
+              struct ww_acquire_ctx *ww_ctx)
+{
+       if (!ww_ctx->is_wait_die)
+               return false;
+
+       if (waiter->ww_ctx->acquired > 0 && __ww_ctx_less(waiter->ww_ctx, ww_ctx)) {
+#ifndef WW_RT
+               debug_mutex_wake_waiter(lock, waiter);
+#endif
+               wake_up_process(waiter->task);
+       }
+
+       return true;
+}
+
+/*
+ * Wound-Wait; wound a lesser @hold_ctx if it holds the lock.
+ *
+ * Wound the lock holder if there are waiters with more important transactions
+ * than the lock holders. Even if multiple waiters may wound the lock holder,
+ * it's sufficient that only one does.
+ */
+static bool __ww_mutex_wound(struct MUTEX *lock,
+                            struct ww_acquire_ctx *ww_ctx,
+                            struct ww_acquire_ctx *hold_ctx)
+{
+       struct task_struct *owner = __ww_mutex_owner(lock);
+
+       lockdep_assert_wait_lock_held(lock);
+
+       /*
+        * Possible through __ww_mutex_add_waiter() when we race with
+        * ww_mutex_set_context_fastpath(). In that case we'll get here again
+        * through __ww_mutex_check_waiters().
+        */
+       if (!hold_ctx)
+               return false;
+
+       /*
+        * Can have !owner because of __mutex_unlock_slowpath(), but if owner,
+        * it cannot go away because we'll have FLAG_WAITERS set and hold
+        * wait_lock.
+        */
+       if (!owner)
+               return false;
+
+       if (ww_ctx->acquired > 0 && __ww_ctx_less(hold_ctx, ww_ctx)) {
+               hold_ctx->wounded = 1;
+
+               /*
+                * wake_up_process() paired with set_current_state()
+                * inserts sufficient barriers to make sure @owner either sees
+                * it's wounded in __ww_mutex_check_kill() or has a
+                * wakeup pending to re-read the wounded state.
+                */
+               if (owner != current)
+                       wake_up_process(owner);
+
+               return true;
+       }
+
+       return false;
+}
+
+/*
+ * We just acquired @lock under @ww_ctx, if there are more important contexts
+ * waiting behind us on the wait-list, check if they need to die, or wound us.
+ *
+ * See __ww_mutex_add_waiter() for the list-order construction; basically the
+ * list is ordered by stamp, smallest (oldest) first.
+ *
+ * This relies on never mixing wait-die/wound-wait on the same wait-list;
+ * which is currently ensured by that being a ww_class property.
+ *
+ * The current task must not be on the wait list.
+ */
+static void
+__ww_mutex_check_waiters(struct MUTEX *lock, struct ww_acquire_ctx *ww_ctx)
+{
+       struct MUTEX_WAITER *cur;
+
+       lockdep_assert_wait_lock_held(lock);
+
+       for (cur = __ww_waiter_first(lock); cur;
+            cur = __ww_waiter_next(lock, cur)) {
+
+               if (!cur->ww_ctx)
+                       continue;
+
+               if (__ww_mutex_die(lock, cur, ww_ctx) ||
+                   __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx))
+                       break;
+       }
+}
+
+/*
+ * After acquiring lock with fastpath, where we do not hold wait_lock, set ctx
+ * and wake up any waiters so they can recheck.
+ */
+static __always_inline void
+ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
+{
+       ww_mutex_lock_acquired(lock, ctx);
+
+       /*
+        * The lock->ctx update should be visible on all cores before
+        * the WAITERS check is done, otherwise contended waiters might be
+        * missed. The contended waiters will either see ww_ctx == NULL
+        * and keep spinning, or it will acquire wait_lock, add itself
+        * to waiter list and sleep.
+        */
+       smp_mb(); /* See comments above and below. */
+
+       /*
+        * [W] ww->ctx = ctx        [W] MUTEX_FLAG_WAITERS
+        *     MB                       MB
+        * [R] MUTEX_FLAG_WAITERS   [R] ww->ctx
+        *
+        * The memory barrier above pairs with the memory barrier in
+        * __ww_mutex_add_waiter() and makes sure we either observe ww->ctx
+        * and/or !empty list.
+        */
+       if (likely(!__ww_mutex_has_waiters(&lock->base)))
+               return;
+
+       /*
+        * Uh oh, we raced in fastpath, check if any of the waiters need to
+        * die or wound us.
+        */
+       lock_wait_lock(&lock->base);
+       __ww_mutex_check_waiters(&lock->base, ctx);
+       unlock_wait_lock(&lock->base);
+}
+
+static __always_inline int
+__ww_mutex_kill(struct MUTEX *lock, struct ww_acquire_ctx *ww_ctx)
+{
+       if (ww_ctx->acquired > 0) {
+#ifdef DEBUG_WW_MUTEXES
+               struct ww_mutex *ww;
+
+               ww = container_of(lock, struct ww_mutex, base);
+               DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
+               ww_ctx->contending_lock = ww;
+#endif
+               return -EDEADLK;
+       }
+
+       return 0;
+}
+
+/*
+ * Check the wound condition for the current lock acquire.
+ *
+ * Wound-Wait: If we're wounded, kill ourself.
+ *
+ * Wait-Die: If we're trying to acquire a lock already held by an older
+ *           context, kill ourselves.
+ *
+ * Since __ww_mutex_add_waiter() orders the wait-list on stamp, we only have to
+ * look at waiters before us in the wait-list.
+ */
+static inline int
+__ww_mutex_check_kill(struct MUTEX *lock, struct MUTEX_WAITER *waiter,
+                     struct ww_acquire_ctx *ctx)
+{
+       struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
+       struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
+       struct MUTEX_WAITER *cur;
+
+       if (ctx->acquired == 0)
+               return 0;
+
+       if (!ctx->is_wait_die) {
+               if (ctx->wounded)
+                       return __ww_mutex_kill(lock, ctx);
+
+               return 0;
+       }
+
+       if (hold_ctx && __ww_ctx_less(ctx, hold_ctx))
+               return __ww_mutex_kill(lock, ctx);
+
+       /*
+        * If there is a waiter in front of us that has a context, then its
+        * stamp is earlier than ours and we must kill ourself.
+        */
+       for (cur = __ww_waiter_prev(lock, waiter); cur;
+            cur = __ww_waiter_prev(lock, cur)) {
+
+               if (!cur->ww_ctx)
+                       continue;
+
+               return __ww_mutex_kill(lock, ctx);
+       }
+
+       return 0;
+}
+
+/*
+ * Add @waiter to the wait-list, keep the wait-list ordered by stamp, smallest
+ * first. Such that older contexts are preferred to acquire the lock over
+ * younger contexts.
+ *
+ * Waiters without context are interspersed in FIFO order.
+ *
+ * Furthermore, for Wait-Die kill ourself immediately when possible (there are
+ * older contexts already waiting) to avoid unnecessary waiting and for
+ * Wound-Wait ensure we wound the owning context when it is younger.
+ */
+static inline int
+__ww_mutex_add_waiter(struct MUTEX_WAITER *waiter,
+                     struct MUTEX *lock,
+                     struct ww_acquire_ctx *ww_ctx)
+{
+       struct MUTEX_WAITER *cur, *pos = NULL;
+       bool is_wait_die;
+
+       if (!ww_ctx) {
+               __ww_waiter_add(lock, waiter, NULL);
+               return 0;
+       }
+
+       is_wait_die = ww_ctx->is_wait_die;
+
+       /*
+        * Add the waiter before the first waiter with a higher stamp.
+        * Waiters without a context are skipped to avoid starving
+        * them. Wait-Die waiters may die here. Wound-Wait waiters
+        * never die here, but they are sorted in stamp order and
+        * may wound the lock holder.
+        */
+       for (cur = __ww_waiter_last(lock); cur;
+            cur = __ww_waiter_prev(lock, cur)) {
+
+               if (!cur->ww_ctx)
+                       continue;
+
+               if (__ww_ctx_less(ww_ctx, cur->ww_ctx)) {
+                       /*
+                        * Wait-Die: if we find an older context waiting, there
+                        * is no point in queueing behind it, as we'd have to
+                        * die the moment it would acquire the lock.
+                        */
+                       if (is_wait_die) {
+                               int ret = __ww_mutex_kill(lock, ww_ctx);
+
+                               if (ret)
+                                       return ret;
+                       }
+
+                       break;
+               }
+
+               pos = cur;
+
+               /* Wait-Die: ensure younger waiters die. */
+               __ww_mutex_die(lock, cur, ww_ctx);
+       }
+
+       __ww_waiter_add(lock, waiter, pos);
+
+       /*
+        * Wound-Wait: if we're blocking on a mutex owned by a younger context,
+        * wound that such that we might proceed.
+        */
+       if (!is_wait_die) {
+               struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
+
+               /*
+                * See ww_mutex_set_context_fastpath(). Orders setting
+                * MUTEX_FLAG_WAITERS vs the ww->ctx load,
+                * such that either we or the fastpath will wound @ww->ctx.
+                */
+               smp_mb();
+               __ww_mutex_wound(lock, ww_ctx, ww->ctx);
+       }
+
+       return 0;
+}
+
+static inline void __ww_mutex_unlock(struct ww_mutex *lock)
+{
+       if (lock->ctx) {
+#ifdef DEBUG_WW_MUTEXES
+               DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
+#endif
+               if (lock->ctx->acquired > 0)
+                       lock->ctx->acquired--;
+               lock->ctx = NULL;
+       }
+}
diff --git a/kernel/locking/ww_rt_mutex.c b/kernel/locking/ww_rt_mutex.c
new file mode 100644 (file)
index 0000000..3f1fff7
--- /dev/null
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * rtmutex API
+ */
+#include <linux/spinlock.h>
+#include <linux/export.h>
+
+#define RT_MUTEX_BUILD_MUTEX
+#define WW_RT
+#include "rtmutex.c"
+
+static int __sched
+__ww_rt_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx,
+                  unsigned int state, unsigned long ip)
+{
+       struct lockdep_map __maybe_unused *nest_lock = NULL;
+       struct rt_mutex *rtm = &lock->base;
+       int ret;
+
+       might_sleep();
+
+       if (ww_ctx) {
+               if (unlikely(ww_ctx == READ_ONCE(lock->ctx)))
+                       return -EALREADY;
+
+               /*
+                * Reset the wounded flag after a kill. No other process can
+                * race and wound us here, since they can't have a valid owner
+                * pointer if we don't have any locks held.
+                */
+               if (ww_ctx->acquired == 0)
+                       ww_ctx->wounded = 0;
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+               nest_lock = &ww_ctx->dep_map;
+#endif
+       }
+       mutex_acquire_nest(&rtm->dep_map, 0, 0, nest_lock, ip);
+
+       if (likely(rt_mutex_cmpxchg_acquire(&rtm->rtmutex, NULL, current))) {
+               if (ww_ctx)
+                       ww_mutex_set_context_fastpath(lock, ww_ctx);
+               return 0;
+       }
+
+       ret = rt_mutex_slowlock(&rtm->rtmutex, ww_ctx, state);
+
+       if (ret)
+               mutex_release(&rtm->dep_map, ip);
+       return ret;
+}
+
+int __sched
+ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
+{
+       return __ww_rt_mutex_lock(lock, ctx, TASK_UNINTERRUPTIBLE, _RET_IP_);
+}
+EXPORT_SYMBOL(ww_mutex_lock);
+
+int __sched
+ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
+{
+       return __ww_rt_mutex_lock(lock, ctx, TASK_INTERRUPTIBLE, _RET_IP_);
+}
+EXPORT_SYMBOL(ww_mutex_lock_interruptible);
+
+void __sched ww_mutex_unlock(struct ww_mutex *lock)
+{
+       struct rt_mutex *rtm = &lock->base;
+
+       __ww_mutex_unlock(lock);
+
+       mutex_release(&rtm->dep_map, _RET_IP_);
+       __rt_mutex_unlock(&rtm->rtmutex);
+}
+EXPORT_SYMBOL(ww_mutex_unlock);
index d4d3ba6..18d3a5c 100644 (file)
@@ -9,19 +9,6 @@
  *
  * Copyright (c) 2020 Oracle and/or its affiliates.
  * Author: Daniel Jordan <daniel.m.jordan@oracle.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  */
 
 #include <linux/completion.h>
@@ -211,7 +198,7 @@ int padata_do_parallel(struct padata_shell *ps,
        if ((pinst->flags & PADATA_RESET))
                goto out;
 
-       atomic_inc(&pd->refcnt);
+       refcount_inc(&pd->refcnt);
        padata->pd = pd;
        padata->cb_cpu = *cb_cpu;
 
@@ -383,7 +370,7 @@ static void padata_serial_worker(struct work_struct *serial_work)
        }
        local_bh_enable();
 
-       if (atomic_sub_and_test(cnt, &pd->refcnt))
+       if (refcount_sub_and_test(cnt, &pd->refcnt))
                padata_free_pd(pd);
 }
 
@@ -593,7 +580,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_shell *ps)
        padata_init_reorder_list(pd);
        padata_init_squeues(pd);
        pd->seq_nr = -1;
-       atomic_set(&pd->refcnt, 1);
+       refcount_set(&pd->refcnt, 1);
        spin_lock_init(&pd->lock);
        pd->cpu = cpumask_first(pd->cpumask.pcpu);
        INIT_WORK(&pd->reorder_work, invoke_padata_reorder);
@@ -667,7 +654,7 @@ static int padata_replace(struct padata_instance *pinst)
        synchronize_rcu();
 
        list_for_each_entry_continue_reverse(ps, &pinst->pslist, list)
-               if (atomic_dec_and_test(&ps->opd->refcnt))
+               if (refcount_dec_and_test(&ps->opd->refcnt))
                        padata_free_pd(ps->opd);
 
        pinst->flags &= ~PADATA_RESET;
@@ -733,7 +720,7 @@ int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
        struct cpumask *serial_mask, *parallel_mask;
        int err = -EINVAL;
 
-       get_online_cpus();
+       cpus_read_lock();
        mutex_lock(&pinst->lock);
 
        switch (cpumask_type) {
@@ -753,7 +740,7 @@ int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
 
 out:
        mutex_unlock(&pinst->lock);
-       put_online_cpus();
+       cpus_read_unlock();
 
        return err;
 }
@@ -992,7 +979,7 @@ struct padata_instance *padata_alloc(const char *name)
        if (!pinst->parallel_wq)
                goto err_free_inst;
 
-       get_online_cpus();
+       cpus_read_lock();
 
        pinst->serial_wq = alloc_workqueue("%s_serial", WQ_MEM_RECLAIM |
                                           WQ_CPU_INTENSIVE, 1, name);
@@ -1026,7 +1013,7 @@ struct padata_instance *padata_alloc(const char *name)
                                                    &pinst->cpu_dead_node);
 #endif
 
-       put_online_cpus();
+       cpus_read_unlock();
 
        return pinst;
 
@@ -1036,7 +1023,7 @@ err_free_masks:
 err_free_serial_wq:
        destroy_workqueue(pinst->serial_wq);
 err_put_cpus:
-       put_online_cpus();
+       cpus_read_unlock();
        destroy_workqueue(pinst->parallel_wq);
 err_free_inst:
        kfree(pinst);
@@ -1074,9 +1061,9 @@ struct padata_shell *padata_alloc_shell(struct padata_instance *pinst)
 
        ps->pinst = pinst;
 
-       get_online_cpus();
+       cpus_read_lock();
        pd = padata_alloc_pd(ps);
-       put_online_cpus();
+       cpus_read_unlock();
 
        if (!pd)
                goto out_free_ps;
index 2daa278..8299bd7 100644 (file)
@@ -243,6 +243,24 @@ STANDARD_PARAM_DEF(ulong,  unsigned long,          "%lu",          kstrtoul);
 STANDARD_PARAM_DEF(ullong,     unsigned long long,     "%llu",         kstrtoull);
 STANDARD_PARAM_DEF(hexint,     unsigned int,           "%#08x",        kstrtouint);
 
+int param_set_uint_minmax(const char *val, const struct kernel_param *kp,
+               unsigned int min, unsigned int max)
+{
+       unsigned int num;
+       int ret;
+
+       if (!val)
+               return -EINVAL;
+       ret = kstrtouint(val, 0, &num);
+       if (ret)
+               return ret;
+       if (num < min || num > max)
+               return -EINVAL;
+       *((unsigned int *)kp->arg) = num;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(param_set_uint_minmax);
+
 int param_set_charp(const char *val, const struct kernel_param *kp)
 {
        if (strlen(val) > 1024) {
index ebdf9c6..efe87db 100644 (file)
@@ -550,13 +550,21 @@ struct pid *pidfd_get_pid(unsigned int fd, unsigned int *flags)
  * Note, that this function can only be called after the fd table has
  * been unshared to avoid leaking the pidfd to the new process.
  *
+ * This symbol should not be explicitly exported to loadable modules.
+ *
  * Return: On success, a cloexec pidfd is returned.
  *         On error, a negative errno number will be returned.
  */
-static int pidfd_create(struct pid *pid, unsigned int flags)
+int pidfd_create(struct pid *pid, unsigned int flags)
 {
        int fd;
 
+       if (!pid || !pid_has_task(pid, PIDTYPE_TGID))
+               return -EINVAL;
+
+       if (flags & ~(O_NONBLOCK | O_RDWR | O_CLOEXEC))
+               return -EINVAL;
+
        fd = anon_inode_getfd("[pidfd]", &pidfd_fops, get_pid(pid),
                              flags | O_RDWR | O_CLOEXEC);
        if (fd < 0)
@@ -596,10 +604,7 @@ SYSCALL_DEFINE2(pidfd_open, pid_t, pid, unsigned int, flags)
        if (!p)
                return -ESRCH;
 
-       if (pid_has_task(p, PIDTYPE_TGID))
-               fd = pidfd_create(p, flags);
-       else
-               fd = -EINVAL;
+       fd = pidfd_create(p, flags);
 
        put_pid(p);
        return fd;
index dca51fe..2cc34a2 100644 (file)
@@ -487,7 +487,7 @@ retry:
        if (gp_async) {
                cur_ops->gp_barrier();
        }
-       writer_n_durations[me] = i_max;
+       writer_n_durations[me] = i_max + 1;
        torture_kthread_stopping("rcu_scale_writer");
        return 0;
 }
@@ -561,7 +561,7 @@ rcu_scale_cleanup(void)
                        wdpp = writer_durations[i];
                        if (!wdpp)
                                continue;
-                       for (j = 0; j <= writer_n_durations[i]; j++) {
+                       for (j = 0; j < writer_n_durations[i]; j++) {
                                wdp = &wdpp[j];
                                pr_alert("%s%s %4d writer-duration: %5d %llu\n",
                                        scale_type, SCALE_FLAG,
index 40ef541..ab42152 100644 (file)
@@ -2022,8 +2022,13 @@ static int rcu_torture_stall(void *args)
                          __func__, raw_smp_processor_id());
                while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(),
                                    stop_at))
-                       if (stall_cpu_block)
+                       if (stall_cpu_block) {
+#ifdef CONFIG_PREEMPTION
+                               preempt_schedule();
+#else
                                schedule_timeout_uninterruptible(HZ);
+#endif
+                       }
                if (stall_cpu_irqsoff)
                        local_irq_enable();
                else if (!stall_cpu_block)
index d998a76..66dc14c 100644 (file)
@@ -467,6 +467,40 @@ static struct ref_scale_ops acqrel_ops = {
        .name           = "acqrel"
 };
 
+static volatile u64 stopopts;
+
+static void ref_clock_section(const int nloops)
+{
+       u64 x = 0;
+       int i;
+
+       preempt_disable();
+       for (i = nloops; i >= 0; i--)
+               x += ktime_get_real_fast_ns();
+       preempt_enable();
+       stopopts = x;
+}
+
+static void ref_clock_delay_section(const int nloops, const int udl, const int ndl)
+{
+       u64 x = 0;
+       int i;
+
+       preempt_disable();
+       for (i = nloops; i >= 0; i--) {
+               x += ktime_get_real_fast_ns();
+               un_delay(udl, ndl);
+       }
+       preempt_enable();
+       stopopts = x;
+}
+
+static struct ref_scale_ops clock_ops = {
+       .readsection    = ref_clock_section,
+       .delaysection   = ref_clock_delay_section,
+       .name           = "clock"
+};
+
 static void rcu_scale_one_reader(void)
 {
        if (readdelay <= 0)
@@ -759,7 +793,7 @@ ref_scale_init(void)
        int firsterr = 0;
        static struct ref_scale_ops *scale_ops[] = {
                &rcu_ops, &srcu_ops, &rcu_trace_ops, &rcu_tasks_ops, &refcnt_ops, &rwlock_ops,
-               &rwsem_ops, &lock_ops, &lock_irq_ops, &acqrel_ops,
+               &rwsem_ops, &lock_ops, &lock_irq_ops, &acqrel_ops, &clock_ops,
        };
 
        if (!torture_init_begin(scale_type, verbose))
index 26344dc..a0ba2ed 100644 (file)
@@ -96,7 +96,7 @@ EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
  */
 void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
 {
-       int newval = ssp->srcu_lock_nesting[idx] - 1;
+       int newval = READ_ONCE(ssp->srcu_lock_nesting[idx]) - 1;
 
        WRITE_ONCE(ssp->srcu_lock_nesting[idx], newval);
        if (!newval && READ_ONCE(ssp->srcu_gp_waiting))
index 8536c55..806160c 100644 (file)
@@ -643,8 +643,8 @@ void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); }
 //
 // "Rude" variant of Tasks RCU, inspired by Steve Rostedt's trick of
 // passing an empty function to schedule_on_each_cpu().  This approach
-// provides an asynchronous call_rcu_tasks_rude() API and batching
-// of concurrent calls to the synchronous synchronize_rcu_rude() API.
+// provides an asynchronous call_rcu_tasks_rude() API and batching of
+// concurrent calls to the synchronous synchronize_rcu_tasks_rude() API.
 // This invokes schedule_on_each_cpu() in order to send IPIs far and wide
 // and induces otherwise unnecessary context switches on all online CPUs,
 // whether idle or not.
@@ -785,7 +785,10 @@ EXPORT_SYMBOL_GPL(show_rcu_tasks_rude_gp_kthread);
 //     set that task's .need_qs flag so that task's next outermost
 //     rcu_read_unlock_trace() will report the quiescent state (in which
 //     case the count of readers is incremented).  If both attempts fail,
-//     the task is added to a "holdout" list.
+//     the task is added to a "holdout" list.  Note that IPIs are used
+//     to invoke trc_read_check_handler() in the context of running tasks
+//     in order to avoid ordering overhead on common-case shared-variable
+//     accessses.
 // rcu_tasks_trace_postscan():
 //     Initialize state and attempt to identify an immediate quiescent
 //     state as above (but only for idle tasks), unblock CPU-hotplug
@@ -847,7 +850,7 @@ static DEFINE_IRQ_WORK(rcu_tasks_trace_iw, rcu_read_unlock_iw);
 /* If we are the last reader, wake up the grace-period kthread. */
 void rcu_read_unlock_trace_special(struct task_struct *t, int nesting)
 {
-       int nq = t->trc_reader_special.b.need_qs;
+       int nq = READ_ONCE(t->trc_reader_special.b.need_qs);
 
        if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) &&
            t->trc_reader_special.b.need_mb)
@@ -894,7 +897,7 @@ static void trc_read_check_handler(void *t_in)
 
        // If the task is not in a read-side critical section, and
        // if this is the last reader, awaken the grace-period kthread.
-       if (likely(!t->trc_reader_nesting)) {
+       if (likely(!READ_ONCE(t->trc_reader_nesting))) {
                if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end)))
                        wake_up(&trc_wait);
                // Mark as checked after decrement to avoid false
@@ -903,7 +906,7 @@ static void trc_read_check_handler(void *t_in)
                goto reset_ipi;
        }
        // If we are racing with an rcu_read_unlock_trace(), try again later.
-       if (unlikely(t->trc_reader_nesting < 0)) {
+       if (unlikely(READ_ONCE(t->trc_reader_nesting) < 0)) {
                if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end)))
                        wake_up(&trc_wait);
                goto reset_ipi;
@@ -913,14 +916,14 @@ static void trc_read_check_handler(void *t_in)
        // Get here if the task is in a read-side critical section.  Set
        // its state so that it will awaken the grace-period kthread upon
        // exit from that critical section.
-       WARN_ON_ONCE(t->trc_reader_special.b.need_qs);
+       WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs));
        WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
 
 reset_ipi:
        // Allow future IPIs to be sent on CPU and for task.
        // Also order this IPI handler against any later manipulations of
        // the intended task.
-       smp_store_release(&per_cpu(trc_ipi_to_cpu, smp_processor_id()), false); // ^^^
+       smp_store_release(per_cpu_ptr(&trc_ipi_to_cpu, smp_processor_id()), false); // ^^^
        smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^
 }
 
@@ -950,6 +953,7 @@ static bool trc_inspect_reader(struct task_struct *t, void *arg)
                        n_heavy_reader_ofl_updates++;
                in_qs = true;
        } else {
+               // The task is not running, so C-language access is safe.
                in_qs = likely(!t->trc_reader_nesting);
        }
 
@@ -964,7 +968,7 @@ static bool trc_inspect_reader(struct task_struct *t, void *arg)
        // state so that it will awaken the grace-period kthread upon exit
        // from that critical section.
        atomic_inc(&trc_n_readers_need_end); // One more to wait on.
-       WARN_ON_ONCE(t->trc_reader_special.b.need_qs);
+       WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs));
        WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
        return true;
 }
@@ -982,7 +986,7 @@ static void trc_wait_for_one_reader(struct task_struct *t,
        // The current task had better be in a quiescent state.
        if (t == current) {
                t->trc_reader_checked = true;
-               WARN_ON_ONCE(t->trc_reader_nesting);
+               WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
                return;
        }
 
@@ -994,6 +998,12 @@ static void trc_wait_for_one_reader(struct task_struct *t,
        }
        put_task_struct(t);
 
+       // If this task is not yet on the holdout list, then we are in
+       // an RCU read-side critical section.  Otherwise, the invocation of
+       // rcu_add_holdout() that added it to the list did the necessary
+       // get_task_struct().  Either way, the task cannot be freed out
+       // from under this code.
+
        // If currently running, send an IPI, either way, add to list.
        trc_add_holdout(t, bhp);
        if (task_curr(t) &&
@@ -1092,8 +1102,8 @@ static void show_stalled_task_trace(struct task_struct *t, bool *firstreport)
                 ".I"[READ_ONCE(t->trc_ipi_to_cpu) > 0],
                 ".i"[is_idle_task(t)],
                 ".N"[cpu > 0 && tick_nohz_full_cpu(cpu)],
-                t->trc_reader_nesting,
-                " N"[!!t->trc_reader_special.b.need_qs],
+                READ_ONCE(t->trc_reader_nesting),
+                " N"[!!READ_ONCE(t->trc_reader_special.b.need_qs)],
                 cpu);
        sched_show_task(t);
 }
@@ -1187,7 +1197,7 @@ static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp)
 static void exit_tasks_rcu_finish_trace(struct task_struct *t)
 {
        WRITE_ONCE(t->trc_reader_checked, true);
-       WARN_ON_ONCE(t->trc_reader_nesting);
+       WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
        WRITE_ONCE(t->trc_reader_nesting, 0);
        if (WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs)))
                rcu_read_unlock_trace_special(t, 0);
index 51f24ec..bce848e 100644 (file)
 
 /* Data structures. */
 
-/*
- * Steal a bit from the bottom of ->dynticks for idle entry/exit
- * control.  Initially this is for TLB flushing.
- */
-#define RCU_DYNTICK_CTRL_MASK 0x1
-#define RCU_DYNTICK_CTRL_CTR  (RCU_DYNTICK_CTRL_MASK + 1)
-
 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
        .dynticks_nesting = 1,
        .dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE,
-       .dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR),
+       .dynticks = ATOMIC_INIT(1),
 #ifdef CONFIG_RCU_NOCB_CPU
        .cblist.flags = SEGCBLIST_SOFTIRQ_ONLY,
 #endif
@@ -258,6 +251,15 @@ void rcu_softirq_qs(void)
        rcu_tasks_qs(current, false);
 }
 
+/*
+ * Increment the current CPU's rcu_data structure's ->dynticks field
+ * with ordering.  Return the new value.
+ */
+static noinline noinstr unsigned long rcu_dynticks_inc(int incby)
+{
+       return arch_atomic_add_return(incby, this_cpu_ptr(&rcu_data.dynticks));
+}
+
 /*
  * Record entry into an extended quiescent state.  This is only to be
  * called when not already in an extended quiescent state, that is,
@@ -266,7 +268,6 @@ void rcu_softirq_qs(void)
  */
 static noinstr void rcu_dynticks_eqs_enter(void)
 {
-       struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
        int seq;
 
        /*
@@ -275,13 +276,9 @@ static noinstr void rcu_dynticks_eqs_enter(void)
         * next idle sojourn.
         */
        rcu_dynticks_task_trace_enter();  // Before ->dynticks update!
-       seq = arch_atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
+       seq = rcu_dynticks_inc(1);
        // RCU is no longer watching.  Better be in extended quiescent state!
-       WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
-                    (seq & RCU_DYNTICK_CTRL_CTR));
-       /* Better not have special action (TLB flush) pending! */
-       WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
-                    (seq & RCU_DYNTICK_CTRL_MASK));
+       WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && (seq & 0x1));
 }
 
 /*
@@ -291,7 +288,6 @@ static noinstr void rcu_dynticks_eqs_enter(void)
  */
 static noinstr void rcu_dynticks_eqs_exit(void)
 {
-       struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
        int seq;
 
        /*
@@ -299,15 +295,10 @@ static noinstr void rcu_dynticks_eqs_exit(void)
         * and we also must force ordering with the next RCU read-side
         * critical section.
         */
-       seq = arch_atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
+       seq = rcu_dynticks_inc(1);
        // RCU is now watching.  Better not be in an extended quiescent state!
        rcu_dynticks_task_trace_exit();  // After ->dynticks update!
-       WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
-                    !(seq & RCU_DYNTICK_CTRL_CTR));
-       if (seq & RCU_DYNTICK_CTRL_MASK) {
-               arch_atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdp->dynticks);
-               smp_mb__after_atomic(); /* _exit after clearing mask. */
-       }
+       WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !(seq & 0x1));
 }
 
 /*
@@ -324,9 +315,9 @@ static void rcu_dynticks_eqs_online(void)
 {
        struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
 
-       if (atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR)
+       if (atomic_read(&rdp->dynticks) & 0x1)
                return;
-       atomic_add(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
+       rcu_dynticks_inc(1);
 }
 
 /*
@@ -336,9 +327,7 @@ static void rcu_dynticks_eqs_online(void)
  */
 static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
 {
-       struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
-
-       return !(arch_atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR);
+       return !(atomic_read(this_cpu_ptr(&rcu_data.dynticks)) & 0x1);
 }
 
 /*
@@ -347,9 +336,8 @@ static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
  */
 static int rcu_dynticks_snap(struct rcu_data *rdp)
 {
-       int snap = atomic_add_return(0, &rdp->dynticks);
-
-       return snap & ~RCU_DYNTICK_CTRL_MASK;
+       smp_mb();  // Fundamental RCU ordering guarantee.
+       return atomic_read_acquire(&rdp->dynticks);
 }
 
 /*
@@ -358,7 +346,7 @@ static int rcu_dynticks_snap(struct rcu_data *rdp)
  */
 static bool rcu_dynticks_in_eqs(int snap)
 {
-       return !(snap & RCU_DYNTICK_CTRL_CTR);
+       return !(snap & 0x1);
 }
 
 /* Return true if the specified CPU is currently idle from an RCU viewpoint.  */
@@ -389,8 +377,7 @@ bool rcu_dynticks_zero_in_eqs(int cpu, int *vp)
        int snap;
 
        // If not quiescent, force back to earlier extended quiescent state.
-       snap = atomic_read(&rdp->dynticks) & ~(RCU_DYNTICK_CTRL_MASK |
-                                              RCU_DYNTICK_CTRL_CTR);
+       snap = atomic_read(&rdp->dynticks) & ~0x1;
 
        smp_rmb(); // Order ->dynticks and *vp reads.
        if (READ_ONCE(*vp))
@@ -398,32 +385,7 @@ bool rcu_dynticks_zero_in_eqs(int cpu, int *vp)
        smp_rmb(); // Order *vp read and ->dynticks re-read.
 
        // If still in the same extended quiescent state, we are good!
-       return snap == (atomic_read(&rdp->dynticks) & ~RCU_DYNTICK_CTRL_MASK);
-}
-
-/*
- * Set the special (bottom) bit of the specified CPU so that it
- * will take special action (such as flushing its TLB) on the
- * next exit from an extended quiescent state.  Returns true if
- * the bit was successfully set, or false if the CPU was not in
- * an extended quiescent state.
- */
-bool rcu_eqs_special_set(int cpu)
-{
-       int old;
-       int new;
-       int new_old;
-       struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
-
-       new_old = atomic_read(&rdp->dynticks);
-       do {
-               old = new_old;
-               if (old & RCU_DYNTICK_CTRL_CTR)
-                       return false;
-               new = old | RCU_DYNTICK_CTRL_MASK;
-               new_old = atomic_cmpxchg(&rdp->dynticks, old, new);
-       } while (new_old != old);
-       return true;
+       return snap == atomic_read(&rdp->dynticks);
 }
 
 /*
@@ -439,13 +401,12 @@ bool rcu_eqs_special_set(int cpu)
  */
 notrace void rcu_momentary_dyntick_idle(void)
 {
-       int special;
+       int seq;
 
        raw_cpu_write(rcu_data.rcu_need_heavy_qs, false);
-       special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR,
-                                   &this_cpu_ptr(&rcu_data)->dynticks);
+       seq = rcu_dynticks_inc(2);
        /* It is illegal to call this from idle state. */
-       WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR));
+       WARN_ON_ONCE(!(seq & 0x1));
        rcu_preempt_deferred_qs(current);
 }
 EXPORT_SYMBOL_GPL(rcu_momentary_dyntick_idle);
@@ -1325,7 +1286,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
         */
        jtsq = READ_ONCE(jiffies_to_sched_qs);
        ruqp = per_cpu_ptr(&rcu_data.rcu_urgent_qs, rdp->cpu);
-       rnhqp = &per_cpu(rcu_data.rcu_need_heavy_qs, rdp->cpu);
+       rnhqp = per_cpu_ptr(&rcu_data.rcu_need_heavy_qs, rdp->cpu);
        if (!READ_ONCE(*rnhqp) &&
            (time_after(jiffies, rcu_state.gp_start + jtsq * 2) ||
             time_after(jiffies, rcu_state.jiffies_resched) ||
@@ -1772,7 +1733,7 @@ static void rcu_strict_gp_boundary(void *unused)
 /*
  * Initialize a new grace period.  Return false if no grace period required.
  */
-static bool rcu_gp_init(void)
+static noinline_for_stack bool rcu_gp_init(void)
 {
        unsigned long firstseq;
        unsigned long flags;
@@ -1966,7 +1927,7 @@ static void rcu_gp_fqs(bool first_time)
 /*
  * Loop doing repeated quiescent-state forcing until the grace period ends.
  */
-static void rcu_gp_fqs_loop(void)
+static noinline_for_stack void rcu_gp_fqs_loop(void)
 {
        bool first_gp_fqs;
        int gf = 0;
@@ -1993,8 +1954,8 @@ static void rcu_gp_fqs_loop(void)
                trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
                                       TPS("fqswait"));
                WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_FQS);
-               ret = swait_event_idle_timeout_exclusive(
-                               rcu_state.gp_wq, rcu_gp_fqs_check_wake(&gf), j);
+               (void)swait_event_idle_timeout_exclusive(rcu_state.gp_wq,
+                                rcu_gp_fqs_check_wake(&gf), j);
                rcu_gp_torture_wait();
                WRITE_ONCE(rcu_state.gp_state, RCU_GP_DOING_FQS);
                /* Locking provides needed memory barriers. */
@@ -2471,9 +2432,6 @@ int rcutree_dead_cpu(unsigned int cpu)
        WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus - 1);
        /* Adjust any no-longer-needed kthreads. */
        rcu_boost_kthread_setaffinity(rnp, -1);
-       /* Do any needed no-CB deferred wakeups from this CPU. */
-       do_nocb_deferred_wakeup(per_cpu_ptr(&rcu_data, cpu));
-
        // Stop-machine done, so allow nohz_full to disable tick.
        tick_dep_clear(TICK_DEP_BIT_RCU);
        return 0;
@@ -4050,7 +4008,7 @@ void rcu_barrier(void)
         */
        init_completion(&rcu_state.barrier_completion);
        atomic_set(&rcu_state.barrier_cpu_count, 2);
-       get_online_cpus();
+       cpus_read_lock();
 
        /*
         * Force each CPU with callbacks to register a new callback.
@@ -4081,7 +4039,7 @@ void rcu_barrier(void)
                                          rcu_state.barrier_sequence);
                }
        }
-       put_online_cpus();
+       cpus_read_unlock();
 
        /*
         * Now that we have an rcu_barrier_callback() callback on each
@@ -4784,4 +4742,5 @@ void __init rcu_init(void)
 
 #include "tree_stall.h"
 #include "tree_exp.h"
+#include "tree_nocb.h"
 #include "tree_plugin.h"
diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h
new file mode 100644 (file)
index 0000000..8fdf44f
--- /dev/null
@@ -0,0 +1,1496 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Read-Copy Update mechanism for mutual exclusion (tree-based version)
+ * Internal non-public definitions that provide either classic
+ * or preemptible semantics.
+ *
+ * Copyright Red Hat, 2009
+ * Copyright IBM Corporation, 2009
+ * Copyright SUSE, 2021
+ *
+ * Author: Ingo Molnar <mingo@elte.hu>
+ *        Paul E. McKenney <paulmck@linux.ibm.com>
+ *        Frederic Weisbecker <frederic@kernel.org>
+ */
+
+#ifdef CONFIG_RCU_NOCB_CPU
+static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
+static bool __read_mostly rcu_nocb_poll;    /* Offload kthread are to poll. */
+static inline int rcu_lockdep_is_held_nocb(struct rcu_data *rdp)
+{
+       return lockdep_is_held(&rdp->nocb_lock);
+}
+
+static inline bool rcu_current_is_nocb_kthread(struct rcu_data *rdp)
+{
+       /* Race on early boot between thread creation and assignment */
+       if (!rdp->nocb_cb_kthread || !rdp->nocb_gp_kthread)
+               return true;
+
+       if (current == rdp->nocb_cb_kthread || current == rdp->nocb_gp_kthread)
+               if (in_task())
+                       return true;
+       return false;
+}
+
+/*
+ * Offload callback processing from the boot-time-specified set of CPUs
+ * specified by rcu_nocb_mask.  For the CPUs in the set, there are kthreads
+ * created that pull the callbacks from the corresponding CPU, wait for
+ * a grace period to elapse, and invoke the callbacks.  These kthreads
+ * are organized into GP kthreads, which manage incoming callbacks, wait for
+ * grace periods, and awaken CB kthreads, and the CB kthreads, which only
+ * invoke callbacks.  Each GP kthread invokes its own CBs.  The no-CBs CPUs
+ * do a wake_up() on their GP kthread when they insert a callback into any
+ * empty list, unless the rcu_nocb_poll boot parameter has been specified,
+ * in which case each kthread actively polls its CPU.  (Which isn't so great
+ * for energy efficiency, but which does reduce RCU's overhead on that CPU.)
+ *
+ * This is intended to be used in conjunction with Frederic Weisbecker's
+ * adaptive-idle work, which would seriously reduce OS jitter on CPUs
+ * running CPU-bound user-mode computations.
+ *
+ * Offloading of callbacks can also be used as an energy-efficiency
+ * measure because CPUs with no RCU callbacks queued are more aggressive
+ * about entering dyntick-idle mode.
+ */
+
+
+/*
+ * Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters.
+ * If the list is invalid, a warning is emitted and all CPUs are offloaded.
+ */
+static int __init rcu_nocb_setup(char *str)
+{
+       alloc_bootmem_cpumask_var(&rcu_nocb_mask);
+       if (cpulist_parse(str, rcu_nocb_mask)) {
+               pr_warn("rcu_nocbs= bad CPU range, all CPUs set\n");
+               cpumask_setall(rcu_nocb_mask);
+       }
+       return 1;
+}
+__setup("rcu_nocbs=", rcu_nocb_setup);
+
+static int __init parse_rcu_nocb_poll(char *arg)
+{
+       rcu_nocb_poll = true;
+       return 0;
+}
+early_param("rcu_nocb_poll", parse_rcu_nocb_poll);
+
+/*
+ * Don't bother bypassing ->cblist if the call_rcu() rate is low.
+ * After all, the main point of bypassing is to avoid lock contention
+ * on ->nocb_lock, which only can happen at high call_rcu() rates.
+ */
+static int nocb_nobypass_lim_per_jiffy = 16 * 1000 / HZ;
+module_param(nocb_nobypass_lim_per_jiffy, int, 0);
+
+/*
+ * Acquire the specified rcu_data structure's ->nocb_bypass_lock.  If the
+ * lock isn't immediately available, increment ->nocb_lock_contended to
+ * flag the contention.
+ */
+static void rcu_nocb_bypass_lock(struct rcu_data *rdp)
+       __acquires(&rdp->nocb_bypass_lock)
+{
+       lockdep_assert_irqs_disabled();
+       if (raw_spin_trylock(&rdp->nocb_bypass_lock))
+               return;
+       atomic_inc(&rdp->nocb_lock_contended);
+       WARN_ON_ONCE(smp_processor_id() != rdp->cpu);
+       smp_mb__after_atomic(); /* atomic_inc() before lock. */
+       raw_spin_lock(&rdp->nocb_bypass_lock);
+       smp_mb__before_atomic(); /* atomic_dec() after lock. */
+       atomic_dec(&rdp->nocb_lock_contended);
+}
+
+/*
+ * Spinwait until the specified rcu_data structure's ->nocb_lock is
+ * not contended.  Please note that this is extremely special-purpose,
+ * relying on the fact that at most two kthreads and one CPU contend for
+ * this lock, and also that the two kthreads are guaranteed to have frequent
+ * grace-period-duration time intervals between successive acquisitions
+ * of the lock.  This allows us to use an extremely simple throttling
+ * mechanism, and further to apply it only to the CPU doing floods of
+ * call_rcu() invocations.  Don't try this at home!
+ */
+static void rcu_nocb_wait_contended(struct rcu_data *rdp)
+{
+       WARN_ON_ONCE(smp_processor_id() != rdp->cpu);
+       while (WARN_ON_ONCE(atomic_read(&rdp->nocb_lock_contended)))
+               cpu_relax();
+}
+
+/*
+ * Conditionally acquire the specified rcu_data structure's
+ * ->nocb_bypass_lock.
+ */
+static bool rcu_nocb_bypass_trylock(struct rcu_data *rdp)
+{
+       lockdep_assert_irqs_disabled();
+       return raw_spin_trylock(&rdp->nocb_bypass_lock);
+}
+
+/*
+ * Release the specified rcu_data structure's ->nocb_bypass_lock.
+ */
+static void rcu_nocb_bypass_unlock(struct rcu_data *rdp)
+       __releases(&rdp->nocb_bypass_lock)
+{
+       lockdep_assert_irqs_disabled();
+       raw_spin_unlock(&rdp->nocb_bypass_lock);
+}
+
+/*
+ * Acquire the specified rcu_data structure's ->nocb_lock, but only
+ * if it corresponds to a no-CBs CPU.
+ */
+static void rcu_nocb_lock(struct rcu_data *rdp)
+{
+       lockdep_assert_irqs_disabled();
+       if (!rcu_rdp_is_offloaded(rdp))
+               return;
+       raw_spin_lock(&rdp->nocb_lock);
+}
+
+/*
+ * Release the specified rcu_data structure's ->nocb_lock, but only
+ * if it corresponds to a no-CBs CPU.
+ */
+static void rcu_nocb_unlock(struct rcu_data *rdp)
+{
+       if (rcu_rdp_is_offloaded(rdp)) {
+               lockdep_assert_irqs_disabled();
+               raw_spin_unlock(&rdp->nocb_lock);
+       }
+}
+
+/*
+ * Release the specified rcu_data structure's ->nocb_lock and restore
+ * interrupts, but only if it corresponds to a no-CBs CPU.
+ */
+static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
+                                      unsigned long flags)
+{
+       if (rcu_rdp_is_offloaded(rdp)) {
+               lockdep_assert_irqs_disabled();
+               raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
+       } else {
+               local_irq_restore(flags);
+       }
+}
+
+/* Lockdep check that ->cblist may be safely accessed. */
+static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp)
+{
+       lockdep_assert_irqs_disabled();
+       if (rcu_rdp_is_offloaded(rdp))
+               lockdep_assert_held(&rdp->nocb_lock);
+}
+
+/*
+ * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended
+ * grace period.
+ */
+static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
+{
+       swake_up_all(sq);
+}
+
+static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
+{
+       return &rnp->nocb_gp_wq[rcu_seq_ctr(rnp->gp_seq) & 0x1];
+}
+
+static void rcu_init_one_nocb(struct rcu_node *rnp)
+{
+       init_swait_queue_head(&rnp->nocb_gp_wq[0]);
+       init_swait_queue_head(&rnp->nocb_gp_wq[1]);
+}
+
+/* Is the specified CPU a no-CBs CPU? */
+bool rcu_is_nocb_cpu(int cpu)
+{
+       if (cpumask_available(rcu_nocb_mask))
+               return cpumask_test_cpu(cpu, rcu_nocb_mask);
+       return false;
+}
+
+static bool __wake_nocb_gp(struct rcu_data *rdp_gp,
+                          struct rcu_data *rdp,
+                          bool force, unsigned long flags)
+       __releases(rdp_gp->nocb_gp_lock)
+{
+       bool needwake = false;
+
+       if (!READ_ONCE(rdp_gp->nocb_gp_kthread)) {
+               raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
+               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
+                                   TPS("AlreadyAwake"));
+               return false;
+       }
+
+       if (rdp_gp->nocb_defer_wakeup > RCU_NOCB_WAKE_NOT) {
+               WRITE_ONCE(rdp_gp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
+               del_timer(&rdp_gp->nocb_timer);
+       }
+
+       if (force || READ_ONCE(rdp_gp->nocb_gp_sleep)) {
+               WRITE_ONCE(rdp_gp->nocb_gp_sleep, false);
+               needwake = true;
+       }
+       raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
+       if (needwake) {
+               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DoWake"));
+               wake_up_process(rdp_gp->nocb_gp_kthread);
+       }
+
+       return needwake;
+}
+
+/*
+ * Kick the GP kthread for this NOCB group.
+ */
+static bool wake_nocb_gp(struct rcu_data *rdp, bool force)
+{
+       unsigned long flags;
+       struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
+
+       raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
+       return __wake_nocb_gp(rdp_gp, rdp, force, flags);
+}
+
+/*
+ * Arrange to wake the GP kthread for this NOCB group at some future
+ * time when it is safe to do so.
+ */
+static void wake_nocb_gp_defer(struct rcu_data *rdp, int waketype,
+                              const char *reason)
+{
+       unsigned long flags;
+       struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
+
+       raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
+
+       /*
+        * Bypass wakeup overrides previous deferments. In case
+        * of callback storm, no need to wake up too early.
+        */
+       if (waketype == RCU_NOCB_WAKE_BYPASS) {
+               mod_timer(&rdp_gp->nocb_timer, jiffies + 2);
+               WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype);
+       } else {
+               if (rdp_gp->nocb_defer_wakeup < RCU_NOCB_WAKE)
+                       mod_timer(&rdp_gp->nocb_timer, jiffies + 1);
+               if (rdp_gp->nocb_defer_wakeup < waketype)
+                       WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype);
+       }
+
+       raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
+
+       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, reason);
+}
+
+/*
+ * Flush the ->nocb_bypass queue into ->cblist, enqueuing rhp if non-NULL.
+ * However, if there is a callback to be enqueued and if ->nocb_bypass
+ * proves to be initially empty, just return false because the no-CB GP
+ * kthread may need to be awakened in this case.
+ *
+ * Note that this function always returns true if rhp is NULL.
+ */
+static bool rcu_nocb_do_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
+                                    unsigned long j)
+{
+       struct rcu_cblist rcl;
+
+       WARN_ON_ONCE(!rcu_rdp_is_offloaded(rdp));
+       rcu_lockdep_assert_cblist_protected(rdp);
+       lockdep_assert_held(&rdp->nocb_bypass_lock);
+       if (rhp && !rcu_cblist_n_cbs(&rdp->nocb_bypass)) {
+               raw_spin_unlock(&rdp->nocb_bypass_lock);
+               return false;
+       }
+       /* Note: ->cblist.len already accounts for ->nocb_bypass contents. */
+       if (rhp)
+               rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */
+       rcu_cblist_flush_enqueue(&rcl, &rdp->nocb_bypass, rhp);
+       rcu_segcblist_insert_pend_cbs(&rdp->cblist, &rcl);
+       WRITE_ONCE(rdp->nocb_bypass_first, j);
+       rcu_nocb_bypass_unlock(rdp);
+       return true;
+}
+
+/*
+ * Flush the ->nocb_bypass queue into ->cblist, enqueuing rhp if non-NULL.
+ * However, if there is a callback to be enqueued and if ->nocb_bypass
+ * proves to be initially empty, just return false because the no-CB GP
+ * kthread may need to be awakened in this case.
+ *
+ * Note that this function always returns true if rhp is NULL.
+ */
+static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
+                                 unsigned long j)
+{
+       if (!rcu_rdp_is_offloaded(rdp))
+               return true;
+       rcu_lockdep_assert_cblist_protected(rdp);
+       rcu_nocb_bypass_lock(rdp);
+       return rcu_nocb_do_flush_bypass(rdp, rhp, j);
+}
+
+/*
+ * If the ->nocb_bypass_lock is immediately available, flush the
+ * ->nocb_bypass queue into ->cblist.
+ */
+static void rcu_nocb_try_flush_bypass(struct rcu_data *rdp, unsigned long j)
+{
+       rcu_lockdep_assert_cblist_protected(rdp);
+       if (!rcu_rdp_is_offloaded(rdp) ||
+           !rcu_nocb_bypass_trylock(rdp))
+               return;
+       WARN_ON_ONCE(!rcu_nocb_do_flush_bypass(rdp, NULL, j));
+}
+
+/*
+ * See whether it is appropriate to use the ->nocb_bypass list in order
+ * to control contention on ->nocb_lock.  A limited number of direct
+ * enqueues are permitted into ->cblist per jiffy.  If ->nocb_bypass
+ * is non-empty, further callbacks must be placed into ->nocb_bypass,
+ * otherwise rcu_barrier() breaks.  Use rcu_nocb_flush_bypass() to switch
+ * back to direct use of ->cblist.  However, ->nocb_bypass should not be
+ * used if ->cblist is empty, because otherwise callbacks can be stranded
+ * on ->nocb_bypass because we cannot count on the current CPU ever again
+ * invoking call_rcu().  The general rule is that if ->nocb_bypass is
+ * non-empty, the corresponding no-CBs grace-period kthread must not be
+ * in an indefinite sleep state.
+ *
+ * Finally, it is not permitted to use the bypass during early boot,
+ * as doing so would confuse the auto-initialization code.  Besides
+ * which, there is no point in worrying about lock contention while
+ * there is only one CPU in operation.
+ */
+static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
+                               bool *was_alldone, unsigned long flags)
+{
+       unsigned long c;
+       unsigned long cur_gp_seq;
+       unsigned long j = jiffies;
+       long ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
+
+       lockdep_assert_irqs_disabled();
+
+       // Pure softirq/rcuc based processing: no bypassing, no
+       // locking.
+       if (!rcu_rdp_is_offloaded(rdp)) {
+               *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
+               return false;
+       }
+
+       // In the process of (de-)offloading: no bypassing, but
+       // locking.
+       if (!rcu_segcblist_completely_offloaded(&rdp->cblist)) {
+               rcu_nocb_lock(rdp);
+               *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
+               return false; /* Not offloaded, no bypassing. */
+       }
+
+       // Don't use ->nocb_bypass during early boot.
+       if (rcu_scheduler_active != RCU_SCHEDULER_RUNNING) {
+               rcu_nocb_lock(rdp);
+               WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
+               *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
+               return false;
+       }
+
+       // If we have advanced to a new jiffy, reset counts to allow
+       // moving back from ->nocb_bypass to ->cblist.
+       if (j == rdp->nocb_nobypass_last) {
+               c = rdp->nocb_nobypass_count + 1;
+       } else {
+               WRITE_ONCE(rdp->nocb_nobypass_last, j);
+               c = rdp->nocb_nobypass_count - nocb_nobypass_lim_per_jiffy;
+               if (ULONG_CMP_LT(rdp->nocb_nobypass_count,
+                                nocb_nobypass_lim_per_jiffy))
+                       c = 0;
+               else if (c > nocb_nobypass_lim_per_jiffy)
+                       c = nocb_nobypass_lim_per_jiffy;
+       }
+       WRITE_ONCE(rdp->nocb_nobypass_count, c);
+
+       // If there hasn't yet been all that many ->cblist enqueues
+       // this jiffy, tell the caller to enqueue onto ->cblist.  But flush
+       // ->nocb_bypass first.
+       if (rdp->nocb_nobypass_count < nocb_nobypass_lim_per_jiffy) {
+               rcu_nocb_lock(rdp);
+               *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
+               if (*was_alldone)
+                       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
+                                           TPS("FirstQ"));
+               WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, j));
+               WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
+               return false; // Caller must enqueue the callback.
+       }
+
+       // If ->nocb_bypass has been used too long or is too full,
+       // flush ->nocb_bypass to ->cblist.
+       if ((ncbs && j != READ_ONCE(rdp->nocb_bypass_first)) ||
+           ncbs >= qhimark) {
+               rcu_nocb_lock(rdp);
+               if (!rcu_nocb_flush_bypass(rdp, rhp, j)) {
+                       *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
+                       if (*was_alldone)
+                               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
+                                                   TPS("FirstQ"));
+                       WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
+                       return false; // Caller must enqueue the callback.
+               }
+               if (j != rdp->nocb_gp_adv_time &&
+                   rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
+                   rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) {
+                       rcu_advance_cbs_nowake(rdp->mynode, rdp);
+                       rdp->nocb_gp_adv_time = j;
+               }
+               rcu_nocb_unlock_irqrestore(rdp, flags);
+               return true; // Callback already enqueued.
+       }
+
+       // We need to use the bypass.
+       rcu_nocb_wait_contended(rdp);
+       rcu_nocb_bypass_lock(rdp);
+       ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
+       rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */
+       rcu_cblist_enqueue(&rdp->nocb_bypass, rhp);
+       if (!ncbs) {
+               WRITE_ONCE(rdp->nocb_bypass_first, j);
+               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("FirstBQ"));
+       }
+       rcu_nocb_bypass_unlock(rdp);
+       smp_mb(); /* Order enqueue before wake. */
+       if (ncbs) {
+               local_irq_restore(flags);
+       } else {
+               // No-CBs GP kthread might be indefinitely asleep, if so, wake.
+               rcu_nocb_lock(rdp); // Rare during call_rcu() flood.
+               if (!rcu_segcblist_pend_cbs(&rdp->cblist)) {
+                       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
+                                           TPS("FirstBQwake"));
+                       __call_rcu_nocb_wake(rdp, true, flags);
+               } else {
+                       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
+                                           TPS("FirstBQnoWake"));
+                       rcu_nocb_unlock_irqrestore(rdp, flags);
+               }
+       }
+       return true; // Callback already enqueued.
+}
+
+/*
+ * Awaken the no-CBs grace-period kthread if needed, either due to it
+ * legitimately being asleep or due to overload conditions.
+ *
+ * If warranted, also wake up the kthread servicing this CPUs queues.
+ */
+static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
+                                unsigned long flags)
+                                __releases(rdp->nocb_lock)
+{
+       unsigned long cur_gp_seq;
+       unsigned long j;
+       long len;
+       struct task_struct *t;
+
+       // If we are being polled or there is no kthread, just leave.
+       t = READ_ONCE(rdp->nocb_gp_kthread);
+       if (rcu_nocb_poll || !t) {
+               rcu_nocb_unlock_irqrestore(rdp, flags);
+               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
+                                   TPS("WakeNotPoll"));
+               return;
+       }
+       // Need to actually to a wakeup.
+       len = rcu_segcblist_n_cbs(&rdp->cblist);
+       if (was_alldone) {
+               rdp->qlen_last_fqs_check = len;
+               if (!irqs_disabled_flags(flags)) {
+                       /* ... if queue was empty ... */
+                       rcu_nocb_unlock_irqrestore(rdp, flags);
+                       wake_nocb_gp(rdp, false);
+                       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
+                                           TPS("WakeEmpty"));
+               } else {
+                       rcu_nocb_unlock_irqrestore(rdp, flags);
+                       wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE,
+                                          TPS("WakeEmptyIsDeferred"));
+               }
+       } else if (len > rdp->qlen_last_fqs_check + qhimark) {
+               /* ... or if many callbacks queued. */
+               rdp->qlen_last_fqs_check = len;
+               j = jiffies;
+               if (j != rdp->nocb_gp_adv_time &&
+                   rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
+                   rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) {
+                       rcu_advance_cbs_nowake(rdp->mynode, rdp);
+                       rdp->nocb_gp_adv_time = j;
+               }
+               smp_mb(); /* Enqueue before timer_pending(). */
+               if ((rdp->nocb_cb_sleep ||
+                    !rcu_segcblist_ready_cbs(&rdp->cblist)) &&
+                   !timer_pending(&rdp->nocb_timer)) {
+                       rcu_nocb_unlock_irqrestore(rdp, flags);
+                       wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_FORCE,
+                                          TPS("WakeOvfIsDeferred"));
+               } else {
+                       rcu_nocb_unlock_irqrestore(rdp, flags);
+                       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
+               }
+       } else {
+               rcu_nocb_unlock_irqrestore(rdp, flags);
+               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
+       }
+       return;
+}
+
+/*
+ * Check if we ignore this rdp.
+ *
+ * We check that without holding the nocb lock but
+ * we make sure not to miss a freshly offloaded rdp
+ * with the current ordering:
+ *
+ *  rdp_offload_toggle()        nocb_gp_enabled_cb()
+ * -------------------------   ----------------------------
+ *    WRITE flags                 LOCK nocb_gp_lock
+ *    LOCK nocb_gp_lock           READ/WRITE nocb_gp_sleep
+ *    READ/WRITE nocb_gp_sleep    UNLOCK nocb_gp_lock
+ *    UNLOCK nocb_gp_lock         READ flags
+ */
+static inline bool nocb_gp_enabled_cb(struct rcu_data *rdp)
+{
+       u8 flags = SEGCBLIST_OFFLOADED | SEGCBLIST_KTHREAD_GP;
+
+       return rcu_segcblist_test_flags(&rdp->cblist, flags);
+}
+
+static inline bool nocb_gp_update_state_deoffloading(struct rcu_data *rdp,
+                                                    bool *needwake_state)
+{
+       struct rcu_segcblist *cblist = &rdp->cblist;
+
+       if (rcu_segcblist_test_flags(cblist, SEGCBLIST_OFFLOADED)) {
+               if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP)) {
+                       rcu_segcblist_set_flags(cblist, SEGCBLIST_KTHREAD_GP);
+                       if (rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB))
+                               *needwake_state = true;
+               }
+               return false;
+       }
+
+       /*
+        * De-offloading. Clear our flag and notify the de-offload worker.
+        * We will ignore this rdp until it ever gets re-offloaded.
+        */
+       WARN_ON_ONCE(!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP));
+       rcu_segcblist_clear_flags(cblist, SEGCBLIST_KTHREAD_GP);
+       if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB))
+               *needwake_state = true;
+       return true;
+}
+
+
+/*
+ * No-CBs GP kthreads come here to wait for additional callbacks to show up
+ * or for grace periods to end.
+ */
+static void nocb_gp_wait(struct rcu_data *my_rdp)
+{
+       bool bypass = false;
+       long bypass_ncbs;
+       int __maybe_unused cpu = my_rdp->cpu;
+       unsigned long cur_gp_seq;
+       unsigned long flags;
+       bool gotcbs = false;
+       unsigned long j = jiffies;
+       bool needwait_gp = false; // This prevents actual uninitialized use.
+       bool needwake;
+       bool needwake_gp;
+       struct rcu_data *rdp;
+       struct rcu_node *rnp;
+       unsigned long wait_gp_seq = 0; // Suppress "use uninitialized" warning.
+       bool wasempty = false;
+
+       /*
+        * Each pass through the following loop checks for CBs and for the
+        * nearest grace period (if any) to wait for next.  The CB kthreads
+        * and the global grace-period kthread are awakened if needed.
+        */
+       WARN_ON_ONCE(my_rdp->nocb_gp_rdp != my_rdp);
+       for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_cb_rdp) {
+               bool needwake_state = false;
+
+               if (!nocb_gp_enabled_cb(rdp))
+                       continue;
+               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Check"));
+               rcu_nocb_lock_irqsave(rdp, flags);
+               if (nocb_gp_update_state_deoffloading(rdp, &needwake_state)) {
+                       rcu_nocb_unlock_irqrestore(rdp, flags);
+                       if (needwake_state)
+                               swake_up_one(&rdp->nocb_state_wq);
+                       continue;
+               }
+               bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
+               if (bypass_ncbs &&
+                   (time_after(j, READ_ONCE(rdp->nocb_bypass_first) + 1) ||
+                    bypass_ncbs > 2 * qhimark)) {
+                       // Bypass full or old, so flush it.
+                       (void)rcu_nocb_try_flush_bypass(rdp, j);
+                       bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
+               } else if (!bypass_ncbs && rcu_segcblist_empty(&rdp->cblist)) {
+                       rcu_nocb_unlock_irqrestore(rdp, flags);
+                       if (needwake_state)
+                               swake_up_one(&rdp->nocb_state_wq);
+                       continue; /* No callbacks here, try next. */
+               }
+               if (bypass_ncbs) {
+                       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
+                                           TPS("Bypass"));
+                       bypass = true;
+               }
+               rnp = rdp->mynode;
+
+               // Advance callbacks if helpful and low contention.
+               needwake_gp = false;
+               if (!rcu_segcblist_restempty(&rdp->cblist,
+                                            RCU_NEXT_READY_TAIL) ||
+                   (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
+                    rcu_seq_done(&rnp->gp_seq, cur_gp_seq))) {
+                       raw_spin_lock_rcu_node(rnp); /* irqs disabled. */
+                       needwake_gp = rcu_advance_cbs(rnp, rdp);
+                       wasempty = rcu_segcblist_restempty(&rdp->cblist,
+                                                          RCU_NEXT_READY_TAIL);
+                       raw_spin_unlock_rcu_node(rnp); /* irqs disabled. */
+               }
+               // Need to wait on some grace period?
+               WARN_ON_ONCE(wasempty &&
+                            !rcu_segcblist_restempty(&rdp->cblist,
+                                                     RCU_NEXT_READY_TAIL));
+               if (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq)) {
+                       if (!needwait_gp ||
+                           ULONG_CMP_LT(cur_gp_seq, wait_gp_seq))
+                               wait_gp_seq = cur_gp_seq;
+                       needwait_gp = true;
+                       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
+                                           TPS("NeedWaitGP"));
+               }
+               if (rcu_segcblist_ready_cbs(&rdp->cblist)) {
+                       needwake = rdp->nocb_cb_sleep;
+                       WRITE_ONCE(rdp->nocb_cb_sleep, false);
+                       smp_mb(); /* CB invocation -after- GP end. */
+               } else {
+                       needwake = false;
+               }
+               rcu_nocb_unlock_irqrestore(rdp, flags);
+               if (needwake) {
+                       swake_up_one(&rdp->nocb_cb_wq);
+                       gotcbs = true;
+               }
+               if (needwake_gp)
+                       rcu_gp_kthread_wake();
+               if (needwake_state)
+                       swake_up_one(&rdp->nocb_state_wq);
+       }
+
+       my_rdp->nocb_gp_bypass = bypass;
+       my_rdp->nocb_gp_gp = needwait_gp;
+       my_rdp->nocb_gp_seq = needwait_gp ? wait_gp_seq : 0;
+
+       if (bypass && !rcu_nocb_poll) {
+               // At least one child with non-empty ->nocb_bypass, so set
+               // timer in order to avoid stranding its callbacks.
+               wake_nocb_gp_defer(my_rdp, RCU_NOCB_WAKE_BYPASS,
+                                  TPS("WakeBypassIsDeferred"));
+       }
+       if (rcu_nocb_poll) {
+               /* Polling, so trace if first poll in the series. */
+               if (gotcbs)
+                       trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("Poll"));
+               schedule_timeout_idle(1);
+       } else if (!needwait_gp) {
+               /* Wait for callbacks to appear. */
+               trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("Sleep"));
+               swait_event_interruptible_exclusive(my_rdp->nocb_gp_wq,
+                               !READ_ONCE(my_rdp->nocb_gp_sleep));
+               trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("EndSleep"));
+       } else {
+               rnp = my_rdp->mynode;
+               trace_rcu_this_gp(rnp, my_rdp, wait_gp_seq, TPS("StartWait"));
+               swait_event_interruptible_exclusive(
+                       rnp->nocb_gp_wq[rcu_seq_ctr(wait_gp_seq) & 0x1],
+                       rcu_seq_done(&rnp->gp_seq, wait_gp_seq) ||
+                       !READ_ONCE(my_rdp->nocb_gp_sleep));
+               trace_rcu_this_gp(rnp, my_rdp, wait_gp_seq, TPS("EndWait"));
+       }
+       if (!rcu_nocb_poll) {
+               raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags);
+               if (my_rdp->nocb_defer_wakeup > RCU_NOCB_WAKE_NOT) {
+                       WRITE_ONCE(my_rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
+                       del_timer(&my_rdp->nocb_timer);
+               }
+               WRITE_ONCE(my_rdp->nocb_gp_sleep, true);
+               raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags);
+       }
+       my_rdp->nocb_gp_seq = -1;
+       WARN_ON(signal_pending(current));
+}
+
+/*
+ * No-CBs grace-period-wait kthread.  There is one of these per group
+ * of CPUs, but only once at least one CPU in that group has come online
+ * at least once since boot.  This kthread checks for newly posted
+ * callbacks from any of the CPUs it is responsible for, waits for a
+ * grace period, then awakens all of the rcu_nocb_cb_kthread() instances
+ * that then have callback-invocation work to do.
+ */
+static int rcu_nocb_gp_kthread(void *arg)
+{
+       struct rcu_data *rdp = arg;
+
+       for (;;) {
+               WRITE_ONCE(rdp->nocb_gp_loops, rdp->nocb_gp_loops + 1);
+               nocb_gp_wait(rdp);
+               cond_resched_tasks_rcu_qs();
+       }
+       return 0;
+}
+
+static inline bool nocb_cb_can_run(struct rcu_data *rdp)
+{
+       u8 flags = SEGCBLIST_OFFLOADED | SEGCBLIST_KTHREAD_CB;
+       return rcu_segcblist_test_flags(&rdp->cblist, flags);
+}
+
+static inline bool nocb_cb_wait_cond(struct rcu_data *rdp)
+{
+       return nocb_cb_can_run(rdp) && !READ_ONCE(rdp->nocb_cb_sleep);
+}
+
+/*
+ * Invoke any ready callbacks from the corresponding no-CBs CPU,
+ * then, if there are no more, wait for more to appear.
+ */
+static void nocb_cb_wait(struct rcu_data *rdp)
+{
+       struct rcu_segcblist *cblist = &rdp->cblist;
+       unsigned long cur_gp_seq;
+       unsigned long flags;
+       bool needwake_state = false;
+       bool needwake_gp = false;
+       bool can_sleep = true;
+       struct rcu_node *rnp = rdp->mynode;
+
+       local_irq_save(flags);
+       rcu_momentary_dyntick_idle();
+       local_irq_restore(flags);
+       /*
+        * Disable BH to provide the expected environment.  Also, when
+        * transitioning to/from NOCB mode, a self-requeuing callback might
+        * be invoked from softirq.  A short grace period could cause both
+        * instances of this callback would execute concurrently.
+        */
+       local_bh_disable();
+       rcu_do_batch(rdp);
+       local_bh_enable();
+       lockdep_assert_irqs_enabled();
+       rcu_nocb_lock_irqsave(rdp, flags);
+       if (rcu_segcblist_nextgp(cblist, &cur_gp_seq) &&
+           rcu_seq_done(&rnp->gp_seq, cur_gp_seq) &&
+           raw_spin_trylock_rcu_node(rnp)) { /* irqs already disabled. */
+               needwake_gp = rcu_advance_cbs(rdp->mynode, rdp);
+               raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
+       }
+
+       if (rcu_segcblist_test_flags(cblist, SEGCBLIST_OFFLOADED)) {
+               if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB)) {
+                       rcu_segcblist_set_flags(cblist, SEGCBLIST_KTHREAD_CB);
+                       if (rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP))
+                               needwake_state = true;
+               }
+               if (rcu_segcblist_ready_cbs(cblist))
+                       can_sleep = false;
+       } else {
+               /*
+                * De-offloading. Clear our flag and notify the de-offload worker.
+                * We won't touch the callbacks and keep sleeping until we ever
+                * get re-offloaded.
+                */
+               WARN_ON_ONCE(!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB));
+               rcu_segcblist_clear_flags(cblist, SEGCBLIST_KTHREAD_CB);
+               if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP))
+                       needwake_state = true;
+       }
+
+       WRITE_ONCE(rdp->nocb_cb_sleep, can_sleep);
+
+       if (rdp->nocb_cb_sleep)
+               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("CBSleep"));
+
+       rcu_nocb_unlock_irqrestore(rdp, flags);
+       if (needwake_gp)
+               rcu_gp_kthread_wake();
+
+       if (needwake_state)
+               swake_up_one(&rdp->nocb_state_wq);
+
+       do {
+               swait_event_interruptible_exclusive(rdp->nocb_cb_wq,
+                                                   nocb_cb_wait_cond(rdp));
+
+               // VVV Ensure CB invocation follows _sleep test.
+               if (smp_load_acquire(&rdp->nocb_cb_sleep)) { // ^^^
+                       WARN_ON(signal_pending(current));
+                       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty"));
+               }
+       } while (!nocb_cb_can_run(rdp));
+}
+
+/*
+ * Per-rcu_data kthread, but only for no-CBs CPUs.  Repeatedly invoke
+ * nocb_cb_wait() to do the dirty work.
+ */
+static int rcu_nocb_cb_kthread(void *arg)
+{
+       struct rcu_data *rdp = arg;
+
+       // Each pass through this loop does one callback batch, and,
+       // if there are no more ready callbacks, waits for them.
+       for (;;) {
+               nocb_cb_wait(rdp);
+               cond_resched_tasks_rcu_qs();
+       }
+       return 0;
+}
+
+/* Is a deferred wakeup of rcu_nocb_kthread() required? */
+static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level)
+{
+       return READ_ONCE(rdp->nocb_defer_wakeup) >= level;
+}
+
+/* Do a deferred wakeup of rcu_nocb_kthread(). */
+static bool do_nocb_deferred_wakeup_common(struct rcu_data *rdp_gp,
+                                          struct rcu_data *rdp, int level,
+                                          unsigned long flags)
+       __releases(rdp_gp->nocb_gp_lock)
+{
+       int ndw;
+       int ret;
+
+       if (!rcu_nocb_need_deferred_wakeup(rdp_gp, level)) {
+               raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
+               return false;
+       }
+
+       ndw = rdp_gp->nocb_defer_wakeup;
+       ret = __wake_nocb_gp(rdp_gp, rdp, ndw == RCU_NOCB_WAKE_FORCE, flags);
+       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DeferredWake"));
+
+       return ret;
+}
+
+/* Do a deferred wakeup of rcu_nocb_kthread() from a timer handler. */
+static void do_nocb_deferred_wakeup_timer(struct timer_list *t)
+{
+       unsigned long flags;
+       struct rcu_data *rdp = from_timer(rdp, t, nocb_timer);
+
+       WARN_ON_ONCE(rdp->nocb_gp_rdp != rdp);
+       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Timer"));
+
+       raw_spin_lock_irqsave(&rdp->nocb_gp_lock, flags);
+       smp_mb__after_spinlock(); /* Timer expire before wakeup. */
+       do_nocb_deferred_wakeup_common(rdp, rdp, RCU_NOCB_WAKE_BYPASS, flags);
+}
+
+/*
+ * Do a deferred wakeup of rcu_nocb_kthread() from fastpath.
+ * This means we do an inexact common-case check.  Note that if
+ * we miss, ->nocb_timer will eventually clean things up.
+ */
+static bool do_nocb_deferred_wakeup(struct rcu_data *rdp)
+{
+       unsigned long flags;
+       struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
+
+       if (!rdp_gp || !rcu_nocb_need_deferred_wakeup(rdp_gp, RCU_NOCB_WAKE))
+               return false;
+
+       raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
+       return do_nocb_deferred_wakeup_common(rdp_gp, rdp, RCU_NOCB_WAKE, flags);
+}
+
+void rcu_nocb_flush_deferred_wakeup(void)
+{
+       do_nocb_deferred_wakeup(this_cpu_ptr(&rcu_data));
+}
+EXPORT_SYMBOL_GPL(rcu_nocb_flush_deferred_wakeup);
+
+static int rdp_offload_toggle(struct rcu_data *rdp,
+                              bool offload, unsigned long flags)
+       __releases(rdp->nocb_lock)
+{
+       struct rcu_segcblist *cblist = &rdp->cblist;
+       struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
+       bool wake_gp = false;
+
+       rcu_segcblist_offload(cblist, offload);
+
+       if (rdp->nocb_cb_sleep)
+               rdp->nocb_cb_sleep = false;
+       rcu_nocb_unlock_irqrestore(rdp, flags);
+
+       /*
+        * Ignore former value of nocb_cb_sleep and force wake up as it could
+        * have been spuriously set to false already.
+        */
+       swake_up_one(&rdp->nocb_cb_wq);
+
+       raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
+       if (rdp_gp->nocb_gp_sleep) {
+               rdp_gp->nocb_gp_sleep = false;
+               wake_gp = true;
+       }
+       raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
+
+       if (wake_gp)
+               wake_up_process(rdp_gp->nocb_gp_kthread);
+
+       return 0;
+}
+
+static long rcu_nocb_rdp_deoffload(void *arg)
+{
+       struct rcu_data *rdp = arg;
+       struct rcu_segcblist *cblist = &rdp->cblist;
+       unsigned long flags;
+       int ret;
+
+       WARN_ON_ONCE(rdp->cpu != raw_smp_processor_id());
+
+       pr_info("De-offloading %d\n", rdp->cpu);
+
+       rcu_nocb_lock_irqsave(rdp, flags);
+       /*
+        * Flush once and for all now. This suffices because we are
+        * running on the target CPU holding ->nocb_lock (thus having
+        * interrupts disabled), and because rdp_offload_toggle()
+        * invokes rcu_segcblist_offload(), which clears SEGCBLIST_OFFLOADED.
+        * Thus future calls to rcu_segcblist_completely_offloaded() will
+        * return false, which means that future calls to rcu_nocb_try_bypass()
+        * will refuse to put anything into the bypass.
+        */
+       WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies));
+       ret = rdp_offload_toggle(rdp, false, flags);
+       swait_event_exclusive(rdp->nocb_state_wq,
+                             !rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB |
+                                                       SEGCBLIST_KTHREAD_GP));
+       /*
+        * Lock one last time to acquire latest callback updates from kthreads
+        * so we can later handle callbacks locally without locking.
+        */
+       rcu_nocb_lock_irqsave(rdp, flags);
+       /*
+        * Theoretically we could set SEGCBLIST_SOFTIRQ_ONLY after the nocb
+        * lock is released but how about being paranoid for once?
+        */
+       rcu_segcblist_set_flags(cblist, SEGCBLIST_SOFTIRQ_ONLY);
+       /*
+        * With SEGCBLIST_SOFTIRQ_ONLY, we can't use
+        * rcu_nocb_unlock_irqrestore() anymore.
+        */
+       raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
+
+       /* Sanity check */
+       WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
+
+
+       return ret;
+}
+
+int rcu_nocb_cpu_deoffload(int cpu)
+{
+       struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
+       int ret = 0;
+
+       mutex_lock(&rcu_state.barrier_mutex);
+       cpus_read_lock();
+       if (rcu_rdp_is_offloaded(rdp)) {
+               if (cpu_online(cpu)) {
+                       ret = work_on_cpu(cpu, rcu_nocb_rdp_deoffload, rdp);
+                       if (!ret)
+                               cpumask_clear_cpu(cpu, rcu_nocb_mask);
+               } else {
+                       pr_info("NOCB: Can't CB-deoffload an offline CPU\n");
+                       ret = -EINVAL;
+               }
+       }
+       cpus_read_unlock();
+       mutex_unlock(&rcu_state.barrier_mutex);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(rcu_nocb_cpu_deoffload);
+
+static long rcu_nocb_rdp_offload(void *arg)
+{
+       struct rcu_data *rdp = arg;
+       struct rcu_segcblist *cblist = &rdp->cblist;
+       unsigned long flags;
+       int ret;
+
+       WARN_ON_ONCE(rdp->cpu != raw_smp_processor_id());
+       /*
+        * For now we only support re-offload, ie: the rdp must have been
+        * offloaded on boot first.
+        */
+       if (!rdp->nocb_gp_rdp)
+               return -EINVAL;
+
+       pr_info("Offloading %d\n", rdp->cpu);
+       /*
+        * Can't use rcu_nocb_lock_irqsave() while we are in
+        * SEGCBLIST_SOFTIRQ_ONLY mode.
+        */
+       raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
+
+       /*
+        * We didn't take the nocb lock while working on the
+        * rdp->cblist in SEGCBLIST_SOFTIRQ_ONLY mode.
+        * Every modifications that have been done previously on
+        * rdp->cblist must be visible remotely by the nocb kthreads
+        * upon wake up after reading the cblist flags.
+        *
+        * The layout against nocb_lock enforces that ordering:
+        *
+        *  __rcu_nocb_rdp_offload()   nocb_cb_wait()/nocb_gp_wait()
+        * -------------------------   ----------------------------
+        *      WRITE callbacks           rcu_nocb_lock()
+        *      rcu_nocb_lock()           READ flags
+        *      WRITE flags               READ callbacks
+        *      rcu_nocb_unlock()         rcu_nocb_unlock()
+        */
+       ret = rdp_offload_toggle(rdp, true, flags);
+       swait_event_exclusive(rdp->nocb_state_wq,
+                             rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB) &&
+                             rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP));
+
+       return ret;
+}
+
+int rcu_nocb_cpu_offload(int cpu)
+{
+       struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
+       int ret = 0;
+
+       mutex_lock(&rcu_state.barrier_mutex);
+       cpus_read_lock();
+       if (!rcu_rdp_is_offloaded(rdp)) {
+               if (cpu_online(cpu)) {
+                       ret = work_on_cpu(cpu, rcu_nocb_rdp_offload, rdp);
+                       if (!ret)
+                               cpumask_set_cpu(cpu, rcu_nocb_mask);
+               } else {
+                       pr_info("NOCB: Can't CB-offload an offline CPU\n");
+                       ret = -EINVAL;
+               }
+       }
+       cpus_read_unlock();
+       mutex_unlock(&rcu_state.barrier_mutex);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(rcu_nocb_cpu_offload);
+
+void __init rcu_init_nohz(void)
+{
+       int cpu;
+       bool need_rcu_nocb_mask = false;
+       struct rcu_data *rdp;
+
+#if defined(CONFIG_NO_HZ_FULL)
+       if (tick_nohz_full_running && cpumask_weight(tick_nohz_full_mask))
+               need_rcu_nocb_mask = true;
+#endif /* #if defined(CONFIG_NO_HZ_FULL) */
+
+       if (!cpumask_available(rcu_nocb_mask) && need_rcu_nocb_mask) {
+               if (!zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL)) {
+                       pr_info("rcu_nocb_mask allocation failed, callback offloading disabled.\n");
+                       return;
+               }
+       }
+       if (!cpumask_available(rcu_nocb_mask))
+               return;
+
+#if defined(CONFIG_NO_HZ_FULL)
+       if (tick_nohz_full_running)
+               cpumask_or(rcu_nocb_mask, rcu_nocb_mask, tick_nohz_full_mask);
+#endif /* #if defined(CONFIG_NO_HZ_FULL) */
+
+       if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) {
+               pr_info("\tNote: kernel parameter 'rcu_nocbs=', 'nohz_full', or 'isolcpus=' contains nonexistent CPUs.\n");
+               cpumask_and(rcu_nocb_mask, cpu_possible_mask,
+                           rcu_nocb_mask);
+       }
+       if (cpumask_empty(rcu_nocb_mask))
+               pr_info("\tOffload RCU callbacks from CPUs: (none).\n");
+       else
+               pr_info("\tOffload RCU callbacks from CPUs: %*pbl.\n",
+                       cpumask_pr_args(rcu_nocb_mask));
+       if (rcu_nocb_poll)
+               pr_info("\tPoll for callbacks from no-CBs CPUs.\n");
+
+       for_each_cpu(cpu, rcu_nocb_mask) {
+               rdp = per_cpu_ptr(&rcu_data, cpu);
+               if (rcu_segcblist_empty(&rdp->cblist))
+                       rcu_segcblist_init(&rdp->cblist);
+               rcu_segcblist_offload(&rdp->cblist, true);
+               rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_KTHREAD_CB);
+               rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_KTHREAD_GP);
+       }
+       rcu_organize_nocb_kthreads();
+}
+
+/* Initialize per-rcu_data variables for no-CBs CPUs. */
+static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
+{
+       init_swait_queue_head(&rdp->nocb_cb_wq);
+       init_swait_queue_head(&rdp->nocb_gp_wq);
+       init_swait_queue_head(&rdp->nocb_state_wq);
+       raw_spin_lock_init(&rdp->nocb_lock);
+       raw_spin_lock_init(&rdp->nocb_bypass_lock);
+       raw_spin_lock_init(&rdp->nocb_gp_lock);
+       timer_setup(&rdp->nocb_timer, do_nocb_deferred_wakeup_timer, 0);
+       rcu_cblist_init(&rdp->nocb_bypass);
+}
+
+/*
+ * If the specified CPU is a no-CBs CPU that does not already have its
+ * rcuo CB kthread, spawn it.  Additionally, if the rcuo GP kthread
+ * for this CPU's group has not yet been created, spawn it as well.
+ */
+static void rcu_spawn_one_nocb_kthread(int cpu)
+{
+       struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
+       struct rcu_data *rdp_gp;
+       struct task_struct *t;
+
+       /*
+        * If this isn't a no-CBs CPU or if it already has an rcuo kthread,
+        * then nothing to do.
+        */
+       if (!rcu_is_nocb_cpu(cpu) || rdp->nocb_cb_kthread)
+               return;
+
+       /* If we didn't spawn the GP kthread first, reorganize! */
+       rdp_gp = rdp->nocb_gp_rdp;
+       if (!rdp_gp->nocb_gp_kthread) {
+               t = kthread_run(rcu_nocb_gp_kthread, rdp_gp,
+                               "rcuog/%d", rdp_gp->cpu);
+               if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo GP kthread, OOM is now expected behavior\n", __func__))
+                       return;
+               WRITE_ONCE(rdp_gp->nocb_gp_kthread, t);
+       }
+
+       /* Spawn the kthread for this CPU. */
+       t = kthread_run(rcu_nocb_cb_kthread, rdp,
+                       "rcuo%c/%d", rcu_state.abbr, cpu);
+       if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo CB kthread, OOM is now expected behavior\n", __func__))
+               return;
+       WRITE_ONCE(rdp->nocb_cb_kthread, t);
+       WRITE_ONCE(rdp->nocb_gp_kthread, rdp_gp->nocb_gp_kthread);
+}
+
+/*
+ * If the specified CPU is a no-CBs CPU that does not already have its
+ * rcuo kthread, spawn it.
+ */
+static void rcu_spawn_cpu_nocb_kthread(int cpu)
+{
+       if (rcu_scheduler_fully_active)
+               rcu_spawn_one_nocb_kthread(cpu);
+}
+
+/*
+ * Once the scheduler is running, spawn rcuo kthreads for all online
+ * no-CBs CPUs.  This assumes that the early_initcall()s happen before
+ * non-boot CPUs come online -- if this changes, we will need to add
+ * some mutual exclusion.
+ */
+static void __init rcu_spawn_nocb_kthreads(void)
+{
+       int cpu;
+
+       for_each_online_cpu(cpu)
+               rcu_spawn_cpu_nocb_kthread(cpu);
+}
+
+/* How many CB CPU IDs per GP kthread?  Default of -1 for sqrt(nr_cpu_ids). */
+static int rcu_nocb_gp_stride = -1;
+module_param(rcu_nocb_gp_stride, int, 0444);
+
+/*
+ * Initialize GP-CB relationships for all no-CBs CPU.
+ */
+static void __init rcu_organize_nocb_kthreads(void)
+{
+       int cpu;
+       bool firsttime = true;
+       bool gotnocbs = false;
+       bool gotnocbscbs = true;
+       int ls = rcu_nocb_gp_stride;
+       int nl = 0;  /* Next GP kthread. */
+       struct rcu_data *rdp;
+       struct rcu_data *rdp_gp = NULL;  /* Suppress misguided gcc warn. */
+       struct rcu_data *rdp_prev = NULL;
+
+       if (!cpumask_available(rcu_nocb_mask))
+               return;
+       if (ls == -1) {
+               ls = nr_cpu_ids / int_sqrt(nr_cpu_ids);
+               rcu_nocb_gp_stride = ls;
+       }
+
+       /*
+        * Each pass through this loop sets up one rcu_data structure.
+        * Should the corresponding CPU come online in the future, then
+        * we will spawn the needed set of rcu_nocb_kthread() kthreads.
+        */
+       for_each_cpu(cpu, rcu_nocb_mask) {
+               rdp = per_cpu_ptr(&rcu_data, cpu);
+               if (rdp->cpu >= nl) {
+                       /* New GP kthread, set up for CBs & next GP. */
+                       gotnocbs = true;
+                       nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls;
+                       rdp->nocb_gp_rdp = rdp;
+                       rdp_gp = rdp;
+                       if (dump_tree) {
+                               if (!firsttime)
+                                       pr_cont("%s\n", gotnocbscbs
+                                                       ? "" : " (self only)");
+                               gotnocbscbs = false;
+                               firsttime = false;
+                               pr_alert("%s: No-CB GP kthread CPU %d:",
+                                        __func__, cpu);
+                       }
+               } else {
+                       /* Another CB kthread, link to previous GP kthread. */
+                       gotnocbscbs = true;
+                       rdp->nocb_gp_rdp = rdp_gp;
+                       rdp_prev->nocb_next_cb_rdp = rdp;
+                       if (dump_tree)
+                               pr_cont(" %d", cpu);
+               }
+               rdp_prev = rdp;
+       }
+       if (gotnocbs && dump_tree)
+               pr_cont("%s\n", gotnocbscbs ? "" : " (self only)");
+}
+
+/*
+ * Bind the current task to the offloaded CPUs.  If there are no offloaded
+ * CPUs, leave the task unbound.  Splat if the bind attempt fails.
+ */
+void rcu_bind_current_to_nocb(void)
+{
+       if (cpumask_available(rcu_nocb_mask) && cpumask_weight(rcu_nocb_mask))
+               WARN_ON(sched_setaffinity(current->pid, rcu_nocb_mask));
+}
+EXPORT_SYMBOL_GPL(rcu_bind_current_to_nocb);
+
+// The ->on_cpu field is available only in CONFIG_SMP=y, so...
+#ifdef CONFIG_SMP
+static char *show_rcu_should_be_on_cpu(struct task_struct *tsp)
+{
+       return tsp && task_is_running(tsp) && !tsp->on_cpu ? "!" : "";
+}
+#else // #ifdef CONFIG_SMP
+static char *show_rcu_should_be_on_cpu(struct task_struct *tsp)
+{
+       return "";
+}
+#endif // #else #ifdef CONFIG_SMP
+
+/*
+ * Dump out nocb grace-period kthread state for the specified rcu_data
+ * structure.
+ */
+static void show_rcu_nocb_gp_state(struct rcu_data *rdp)
+{
+       struct rcu_node *rnp = rdp->mynode;
+
+       pr_info("nocb GP %d %c%c%c%c%c %c[%c%c] %c%c:%ld rnp %d:%d %lu %c CPU %d%s\n",
+               rdp->cpu,
+               "kK"[!!rdp->nocb_gp_kthread],
+               "lL"[raw_spin_is_locked(&rdp->nocb_gp_lock)],
+               "dD"[!!rdp->nocb_defer_wakeup],
+               "tT"[timer_pending(&rdp->nocb_timer)],
+               "sS"[!!rdp->nocb_gp_sleep],
+               ".W"[swait_active(&rdp->nocb_gp_wq)],
+               ".W"[swait_active(&rnp->nocb_gp_wq[0])],
+               ".W"[swait_active(&rnp->nocb_gp_wq[1])],
+               ".B"[!!rdp->nocb_gp_bypass],
+               ".G"[!!rdp->nocb_gp_gp],
+               (long)rdp->nocb_gp_seq,
+               rnp->grplo, rnp->grphi, READ_ONCE(rdp->nocb_gp_loops),
+               rdp->nocb_gp_kthread ? task_state_to_char(rdp->nocb_gp_kthread) : '.',
+               rdp->nocb_cb_kthread ? (int)task_cpu(rdp->nocb_gp_kthread) : -1,
+               show_rcu_should_be_on_cpu(rdp->nocb_cb_kthread));
+}
+
+/* Dump out nocb kthread state for the specified rcu_data structure. */
+static void show_rcu_nocb_state(struct rcu_data *rdp)
+{
+       char bufw[20];
+       char bufr[20];
+       struct rcu_segcblist *rsclp = &rdp->cblist;
+       bool waslocked;
+       bool wassleep;
+
+       if (rdp->nocb_gp_rdp == rdp)
+               show_rcu_nocb_gp_state(rdp);
+
+       sprintf(bufw, "%ld", rsclp->gp_seq[RCU_WAIT_TAIL]);
+       sprintf(bufr, "%ld", rsclp->gp_seq[RCU_NEXT_READY_TAIL]);
+       pr_info("   CB %d^%d->%d %c%c%c%c%c%c F%ld L%ld C%d %c%c%s%c%s%c%c q%ld %c CPU %d%s\n",
+               rdp->cpu, rdp->nocb_gp_rdp->cpu,
+               rdp->nocb_next_cb_rdp ? rdp->nocb_next_cb_rdp->cpu : -1,
+               "kK"[!!rdp->nocb_cb_kthread],
+               "bB"[raw_spin_is_locked(&rdp->nocb_bypass_lock)],
+               "cC"[!!atomic_read(&rdp->nocb_lock_contended)],
+               "lL"[raw_spin_is_locked(&rdp->nocb_lock)],
+               "sS"[!!rdp->nocb_cb_sleep],
+               ".W"[swait_active(&rdp->nocb_cb_wq)],
+               jiffies - rdp->nocb_bypass_first,
+               jiffies - rdp->nocb_nobypass_last,
+               rdp->nocb_nobypass_count,
+               ".D"[rcu_segcblist_ready_cbs(rsclp)],
+               ".W"[!rcu_segcblist_segempty(rsclp, RCU_WAIT_TAIL)],
+               rcu_segcblist_segempty(rsclp, RCU_WAIT_TAIL) ? "" : bufw,
+               ".R"[!rcu_segcblist_segempty(rsclp, RCU_NEXT_READY_TAIL)],
+               rcu_segcblist_segempty(rsclp, RCU_NEXT_READY_TAIL) ? "" : bufr,
+               ".N"[!rcu_segcblist_segempty(rsclp, RCU_NEXT_TAIL)],
+               ".B"[!!rcu_cblist_n_cbs(&rdp->nocb_bypass)],
+               rcu_segcblist_n_cbs(&rdp->cblist),
+               rdp->nocb_cb_kthread ? task_state_to_char(rdp->nocb_cb_kthread) : '.',
+               rdp->nocb_cb_kthread ? (int)task_cpu(rdp->nocb_gp_kthread) : -1,
+               show_rcu_should_be_on_cpu(rdp->nocb_cb_kthread));
+
+       /* It is OK for GP kthreads to have GP state. */
+       if (rdp->nocb_gp_rdp == rdp)
+               return;
+
+       waslocked = raw_spin_is_locked(&rdp->nocb_gp_lock);
+       wassleep = swait_active(&rdp->nocb_gp_wq);
+       if (!rdp->nocb_gp_sleep && !waslocked && !wassleep)
+               return;  /* Nothing untoward. */
+
+       pr_info("   nocb GP activity on CB-only CPU!!! %c%c%c %c\n",
+               "lL"[waslocked],
+               "dD"[!!rdp->nocb_defer_wakeup],
+               "sS"[!!rdp->nocb_gp_sleep],
+               ".W"[wassleep]);
+}
+
+#else /* #ifdef CONFIG_RCU_NOCB_CPU */
+
+static inline int rcu_lockdep_is_held_nocb(struct rcu_data *rdp)
+{
+       return 0;
+}
+
+static inline bool rcu_current_is_nocb_kthread(struct rcu_data *rdp)
+{
+       return false;
+}
+
+/* No ->nocb_lock to acquire.  */
+static void rcu_nocb_lock(struct rcu_data *rdp)
+{
+}
+
+/* No ->nocb_lock to release.  */
+static void rcu_nocb_unlock(struct rcu_data *rdp)
+{
+}
+
+/* No ->nocb_lock to release.  */
+static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
+                                      unsigned long flags)
+{
+       local_irq_restore(flags);
+}
+
+/* Lockdep check that ->cblist may be safely accessed. */
+static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp)
+{
+       lockdep_assert_irqs_disabled();
+}
+
+static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
+{
+}
+
+static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
+{
+       return NULL;
+}
+
+static void rcu_init_one_nocb(struct rcu_node *rnp)
+{
+}
+
+static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
+                                 unsigned long j)
+{
+       return true;
+}
+
+static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
+                               bool *was_alldone, unsigned long flags)
+{
+       return false;
+}
+
+static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty,
+                                unsigned long flags)
+{
+       WARN_ON_ONCE(1);  /* Should be dead code! */
+}
+
+static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
+{
+}
+
+static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level)
+{
+       return false;
+}
+
+static bool do_nocb_deferred_wakeup(struct rcu_data *rdp)
+{
+       return false;
+}
+
+static void rcu_spawn_cpu_nocb_kthread(int cpu)
+{
+}
+
+static void __init rcu_spawn_nocb_kthreads(void)
+{
+}
+
+static void show_rcu_nocb_state(struct rcu_data *rdp)
+{
+}
+
+#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
index de1dc3b..d070059 100644 (file)
 
 #include "../locking/rtmutex_common.h"
 
-#ifdef CONFIG_RCU_NOCB_CPU
-static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
-static bool __read_mostly rcu_nocb_poll;    /* Offload kthread are to poll. */
-static inline int rcu_lockdep_is_held_nocb(struct rcu_data *rdp)
-{
-       return lockdep_is_held(&rdp->nocb_lock);
-}
-
-static inline bool rcu_current_is_nocb_kthread(struct rcu_data *rdp)
-{
-       /* Race on early boot between thread creation and assignment */
-       if (!rdp->nocb_cb_kthread || !rdp->nocb_gp_kthread)
-               return true;
-
-       if (current == rdp->nocb_cb_kthread || current == rdp->nocb_gp_kthread)
-               if (in_task())
-                       return true;
-       return false;
-}
-
-#else
-static inline int rcu_lockdep_is_held_nocb(struct rcu_data *rdp)
-{
-       return 0;
-}
-
-static inline bool rcu_current_is_nocb_kthread(struct rcu_data *rdp)
-{
-       return false;
-}
-
-#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
-
 static bool rcu_rdp_is_offloaded(struct rcu_data *rdp)
 {
        /*
@@ -346,7 +313,7 @@ void rcu_note_context_switch(bool preempt)
 
        trace_rcu_utilization(TPS("Start context switch"));
        lockdep_assert_irqs_disabled();
-       WARN_ON_ONCE(!preempt && rcu_preempt_depth() > 0);
+       WARN_ONCE(!preempt && rcu_preempt_depth() > 0, "Voluntary context switch within RCU read-side critical section!");
        if (rcu_preempt_depth() > 0 &&
            !t->rcu_read_unlock_special.b.blocked) {
 
@@ -405,17 +372,20 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
 
 static void rcu_preempt_read_enter(void)
 {
-       current->rcu_read_lock_nesting++;
+       WRITE_ONCE(current->rcu_read_lock_nesting, READ_ONCE(current->rcu_read_lock_nesting) + 1);
 }
 
 static int rcu_preempt_read_exit(void)
 {
-       return --current->rcu_read_lock_nesting;
+       int ret = READ_ONCE(current->rcu_read_lock_nesting) - 1;
+
+       WRITE_ONCE(current->rcu_read_lock_nesting, ret);
+       return ret;
 }
 
 static void rcu_preempt_depth_set(int val)
 {
-       current->rcu_read_lock_nesting = val;
+       WRITE_ONCE(current->rcu_read_lock_nesting, val);
 }
 
 /*
@@ -559,7 +529,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
                        WRITE_ONCE(rnp->exp_tasks, np);
                if (IS_ENABLED(CONFIG_RCU_BOOST)) {
                        /* Snapshot ->boost_mtx ownership w/rnp->lock held. */
-                       drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t;
+                       drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx.rtmutex) == t;
                        if (&t->rcu_node_entry == rnp->boost_tasks)
                                WRITE_ONCE(rnp->boost_tasks, np);
                }
@@ -586,7 +556,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
 
                /* Unboost if we were boosted. */
                if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex)
-                       rt_mutex_futex_unlock(&rnp->boost_mtx);
+                       rt_mutex_futex_unlock(&rnp->boost_mtx.rtmutex);
 
                /*
                 * If this was the last task on the expedited lists,
@@ -1083,7 +1053,7 @@ static int rcu_boost(struct rcu_node *rnp)
         * section.
         */
        t = container_of(tb, struct task_struct, rcu_node_entry);
-       rt_mutex_init_proxy_locked(&rnp->boost_mtx, t);
+       rt_mutex_init_proxy_locked(&rnp->boost_mtx.rtmutex, t);
        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
        /* Lock only for side effect: boosts task t's priority. */
        rt_mutex_lock(&rnp->boost_mtx);
@@ -1479,1460 +1449,6 @@ static void rcu_cleanup_after_idle(void)
 
 #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
 
-#ifdef CONFIG_RCU_NOCB_CPU
-
-/*
- * Offload callback processing from the boot-time-specified set of CPUs
- * specified by rcu_nocb_mask.  For the CPUs in the set, there are kthreads
- * created that pull the callbacks from the corresponding CPU, wait for
- * a grace period to elapse, and invoke the callbacks.  These kthreads
- * are organized into GP kthreads, which manage incoming callbacks, wait for
- * grace periods, and awaken CB kthreads, and the CB kthreads, which only
- * invoke callbacks.  Each GP kthread invokes its own CBs.  The no-CBs CPUs
- * do a wake_up() on their GP kthread when they insert a callback into any
- * empty list, unless the rcu_nocb_poll boot parameter has been specified,
- * in which case each kthread actively polls its CPU.  (Which isn't so great
- * for energy efficiency, but which does reduce RCU's overhead on that CPU.)
- *
- * This is intended to be used in conjunction with Frederic Weisbecker's
- * adaptive-idle work, which would seriously reduce OS jitter on CPUs
- * running CPU-bound user-mode computations.
- *
- * Offloading of callbacks can also be used as an energy-efficiency
- * measure because CPUs with no RCU callbacks queued are more aggressive
- * about entering dyntick-idle mode.
- */
-
-
-/*
- * Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters.
- * If the list is invalid, a warning is emitted and all CPUs are offloaded.
- */
-static int __init rcu_nocb_setup(char *str)
-{
-       alloc_bootmem_cpumask_var(&rcu_nocb_mask);
-       if (cpulist_parse(str, rcu_nocb_mask)) {
-               pr_warn("rcu_nocbs= bad CPU range, all CPUs set\n");
-               cpumask_setall(rcu_nocb_mask);
-       }
-       return 1;
-}
-__setup("rcu_nocbs=", rcu_nocb_setup);
-
-static int __init parse_rcu_nocb_poll(char *arg)
-{
-       rcu_nocb_poll = true;
-       return 0;
-}
-early_param("rcu_nocb_poll", parse_rcu_nocb_poll);
-
-/*
- * Don't bother bypassing ->cblist if the call_rcu() rate is low.
- * After all, the main point of bypassing is to avoid lock contention
- * on ->nocb_lock, which only can happen at high call_rcu() rates.
- */
-static int nocb_nobypass_lim_per_jiffy = 16 * 1000 / HZ;
-module_param(nocb_nobypass_lim_per_jiffy, int, 0);
-
-/*
- * Acquire the specified rcu_data structure's ->nocb_bypass_lock.  If the
- * lock isn't immediately available, increment ->nocb_lock_contended to
- * flag the contention.
- */
-static void rcu_nocb_bypass_lock(struct rcu_data *rdp)
-       __acquires(&rdp->nocb_bypass_lock)
-{
-       lockdep_assert_irqs_disabled();
-       if (raw_spin_trylock(&rdp->nocb_bypass_lock))
-               return;
-       atomic_inc(&rdp->nocb_lock_contended);
-       WARN_ON_ONCE(smp_processor_id() != rdp->cpu);
-       smp_mb__after_atomic(); /* atomic_inc() before lock. */
-       raw_spin_lock(&rdp->nocb_bypass_lock);
-       smp_mb__before_atomic(); /* atomic_dec() after lock. */
-       atomic_dec(&rdp->nocb_lock_contended);
-}
-
-/*
- * Spinwait until the specified rcu_data structure's ->nocb_lock is
- * not contended.  Please note that this is extremely special-purpose,
- * relying on the fact that at most two kthreads and one CPU contend for
- * this lock, and also that the two kthreads are guaranteed to have frequent
- * grace-period-duration time intervals between successive acquisitions
- * of the lock.  This allows us to use an extremely simple throttling
- * mechanism, and further to apply it only to the CPU doing floods of
- * call_rcu() invocations.  Don't try this at home!
- */
-static void rcu_nocb_wait_contended(struct rcu_data *rdp)
-{
-       WARN_ON_ONCE(smp_processor_id() != rdp->cpu);
-       while (WARN_ON_ONCE(atomic_read(&rdp->nocb_lock_contended)))
-               cpu_relax();
-}
-
-/*
- * Conditionally acquire the specified rcu_data structure's
- * ->nocb_bypass_lock.
- */
-static bool rcu_nocb_bypass_trylock(struct rcu_data *rdp)
-{
-       lockdep_assert_irqs_disabled();
-       return raw_spin_trylock(&rdp->nocb_bypass_lock);
-}
-
-/*
- * Release the specified rcu_data structure's ->nocb_bypass_lock.
- */
-static void rcu_nocb_bypass_unlock(struct rcu_data *rdp)
-       __releases(&rdp->nocb_bypass_lock)
-{
-       lockdep_assert_irqs_disabled();
-       raw_spin_unlock(&rdp->nocb_bypass_lock);
-}
-
-/*
- * Acquire the specified rcu_data structure's ->nocb_lock, but only
- * if it corresponds to a no-CBs CPU.
- */
-static void rcu_nocb_lock(struct rcu_data *rdp)
-{
-       lockdep_assert_irqs_disabled();
-       if (!rcu_rdp_is_offloaded(rdp))
-               return;
-       raw_spin_lock(&rdp->nocb_lock);
-}
-
-/*
- * Release the specified rcu_data structure's ->nocb_lock, but only
- * if it corresponds to a no-CBs CPU.
- */
-static void rcu_nocb_unlock(struct rcu_data *rdp)
-{
-       if (rcu_rdp_is_offloaded(rdp)) {
-               lockdep_assert_irqs_disabled();
-               raw_spin_unlock(&rdp->nocb_lock);
-       }
-}
-
-/*
- * Release the specified rcu_data structure's ->nocb_lock and restore
- * interrupts, but only if it corresponds to a no-CBs CPU.
- */
-static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
-                                      unsigned long flags)
-{
-       if (rcu_rdp_is_offloaded(rdp)) {
-               lockdep_assert_irqs_disabled();
-               raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
-       } else {
-               local_irq_restore(flags);
-       }
-}
-
-/* Lockdep check that ->cblist may be safely accessed. */
-static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp)
-{
-       lockdep_assert_irqs_disabled();
-       if (rcu_rdp_is_offloaded(rdp))
-               lockdep_assert_held(&rdp->nocb_lock);
-}
-
-/*
- * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended
- * grace period.
- */
-static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
-{
-       swake_up_all(sq);
-}
-
-static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
-{
-       return &rnp->nocb_gp_wq[rcu_seq_ctr(rnp->gp_seq) & 0x1];
-}
-
-static void rcu_init_one_nocb(struct rcu_node *rnp)
-{
-       init_swait_queue_head(&rnp->nocb_gp_wq[0]);
-       init_swait_queue_head(&rnp->nocb_gp_wq[1]);
-}
-
-/* Is the specified CPU a no-CBs CPU? */
-bool rcu_is_nocb_cpu(int cpu)
-{
-       if (cpumask_available(rcu_nocb_mask))
-               return cpumask_test_cpu(cpu, rcu_nocb_mask);
-       return false;
-}
-
-static bool __wake_nocb_gp(struct rcu_data *rdp_gp,
-                          struct rcu_data *rdp,
-                          bool force, unsigned long flags)
-       __releases(rdp_gp->nocb_gp_lock)
-{
-       bool needwake = false;
-
-       if (!READ_ONCE(rdp_gp->nocb_gp_kthread)) {
-               raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
-               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
-                                   TPS("AlreadyAwake"));
-               return false;
-       }
-
-       if (rdp_gp->nocb_defer_wakeup > RCU_NOCB_WAKE_NOT) {
-               WRITE_ONCE(rdp_gp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
-               del_timer(&rdp_gp->nocb_timer);
-       }
-
-       if (force || READ_ONCE(rdp_gp->nocb_gp_sleep)) {
-               WRITE_ONCE(rdp_gp->nocb_gp_sleep, false);
-               needwake = true;
-       }
-       raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
-       if (needwake) {
-               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DoWake"));
-               wake_up_process(rdp_gp->nocb_gp_kthread);
-       }
-
-       return needwake;
-}
-
-/*
- * Kick the GP kthread for this NOCB group.
- */
-static bool wake_nocb_gp(struct rcu_data *rdp, bool force)
-{
-       unsigned long flags;
-       struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
-
-       raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
-       return __wake_nocb_gp(rdp_gp, rdp, force, flags);
-}
-
-/*
- * Arrange to wake the GP kthread for this NOCB group at some future
- * time when it is safe to do so.
- */
-static void wake_nocb_gp_defer(struct rcu_data *rdp, int waketype,
-                              const char *reason)
-{
-       unsigned long flags;
-       struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
-
-       raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
-
-       /*
-        * Bypass wakeup overrides previous deferments. In case
-        * of callback storm, no need to wake up too early.
-        */
-       if (waketype == RCU_NOCB_WAKE_BYPASS) {
-               mod_timer(&rdp_gp->nocb_timer, jiffies + 2);
-               WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype);
-       } else {
-               if (rdp_gp->nocb_defer_wakeup < RCU_NOCB_WAKE)
-                       mod_timer(&rdp_gp->nocb_timer, jiffies + 1);
-               if (rdp_gp->nocb_defer_wakeup < waketype)
-                       WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype);
-       }
-
-       raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
-
-       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, reason);
-}
-
-/*
- * Flush the ->nocb_bypass queue into ->cblist, enqueuing rhp if non-NULL.
- * However, if there is a callback to be enqueued and if ->nocb_bypass
- * proves to be initially empty, just return false because the no-CB GP
- * kthread may need to be awakened in this case.
- *
- * Note that this function always returns true if rhp is NULL.
- */
-static bool rcu_nocb_do_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
-                                    unsigned long j)
-{
-       struct rcu_cblist rcl;
-
-       WARN_ON_ONCE(!rcu_rdp_is_offloaded(rdp));
-       rcu_lockdep_assert_cblist_protected(rdp);
-       lockdep_assert_held(&rdp->nocb_bypass_lock);
-       if (rhp && !rcu_cblist_n_cbs(&rdp->nocb_bypass)) {
-               raw_spin_unlock(&rdp->nocb_bypass_lock);
-               return false;
-       }
-       /* Note: ->cblist.len already accounts for ->nocb_bypass contents. */
-       if (rhp)
-               rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */
-       rcu_cblist_flush_enqueue(&rcl, &rdp->nocb_bypass, rhp);
-       rcu_segcblist_insert_pend_cbs(&rdp->cblist, &rcl);
-       WRITE_ONCE(rdp->nocb_bypass_first, j);
-       rcu_nocb_bypass_unlock(rdp);
-       return true;
-}
-
-/*
- * Flush the ->nocb_bypass queue into ->cblist, enqueuing rhp if non-NULL.
- * However, if there is a callback to be enqueued and if ->nocb_bypass
- * proves to be initially empty, just return false because the no-CB GP
- * kthread may need to be awakened in this case.
- *
- * Note that this function always returns true if rhp is NULL.
- */
-static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
-                                 unsigned long j)
-{
-       if (!rcu_rdp_is_offloaded(rdp))
-               return true;
-       rcu_lockdep_assert_cblist_protected(rdp);
-       rcu_nocb_bypass_lock(rdp);
-       return rcu_nocb_do_flush_bypass(rdp, rhp, j);
-}
-
-/*
- * If the ->nocb_bypass_lock is immediately available, flush the
- * ->nocb_bypass queue into ->cblist.
- */
-static void rcu_nocb_try_flush_bypass(struct rcu_data *rdp, unsigned long j)
-{
-       rcu_lockdep_assert_cblist_protected(rdp);
-       if (!rcu_rdp_is_offloaded(rdp) ||
-           !rcu_nocb_bypass_trylock(rdp))
-               return;
-       WARN_ON_ONCE(!rcu_nocb_do_flush_bypass(rdp, NULL, j));
-}
-
-/*
- * See whether it is appropriate to use the ->nocb_bypass list in order
- * to control contention on ->nocb_lock.  A limited number of direct
- * enqueues are permitted into ->cblist per jiffy.  If ->nocb_bypass
- * is non-empty, further callbacks must be placed into ->nocb_bypass,
- * otherwise rcu_barrier() breaks.  Use rcu_nocb_flush_bypass() to switch
- * back to direct use of ->cblist.  However, ->nocb_bypass should not be
- * used if ->cblist is empty, because otherwise callbacks can be stranded
- * on ->nocb_bypass because we cannot count on the current CPU ever again
- * invoking call_rcu().  The general rule is that if ->nocb_bypass is
- * non-empty, the corresponding no-CBs grace-period kthread must not be
- * in an indefinite sleep state.
- *
- * Finally, it is not permitted to use the bypass during early boot,
- * as doing so would confuse the auto-initialization code.  Besides
- * which, there is no point in worrying about lock contention while
- * there is only one CPU in operation.
- */
-static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
-                               bool *was_alldone, unsigned long flags)
-{
-       unsigned long c;
-       unsigned long cur_gp_seq;
-       unsigned long j = jiffies;
-       long ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
-
-       lockdep_assert_irqs_disabled();
-
-       // Pure softirq/rcuc based processing: no bypassing, no
-       // locking.
-       if (!rcu_rdp_is_offloaded(rdp)) {
-               *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
-               return false;
-       }
-
-       // In the process of (de-)offloading: no bypassing, but
-       // locking.
-       if (!rcu_segcblist_completely_offloaded(&rdp->cblist)) {
-               rcu_nocb_lock(rdp);
-               *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
-               return false; /* Not offloaded, no bypassing. */
-       }
-
-       // Don't use ->nocb_bypass during early boot.
-       if (rcu_scheduler_active != RCU_SCHEDULER_RUNNING) {
-               rcu_nocb_lock(rdp);
-               WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
-               *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
-               return false;
-       }
-
-       // If we have advanced to a new jiffy, reset counts to allow
-       // moving back from ->nocb_bypass to ->cblist.
-       if (j == rdp->nocb_nobypass_last) {
-               c = rdp->nocb_nobypass_count + 1;
-       } else {
-               WRITE_ONCE(rdp->nocb_nobypass_last, j);
-               c = rdp->nocb_nobypass_count - nocb_nobypass_lim_per_jiffy;
-               if (ULONG_CMP_LT(rdp->nocb_nobypass_count,
-                                nocb_nobypass_lim_per_jiffy))
-                       c = 0;
-               else if (c > nocb_nobypass_lim_per_jiffy)
-                       c = nocb_nobypass_lim_per_jiffy;
-       }
-       WRITE_ONCE(rdp->nocb_nobypass_count, c);
-
-       // If there hasn't yet been all that many ->cblist enqueues
-       // this jiffy, tell the caller to enqueue onto ->cblist.  But flush
-       // ->nocb_bypass first.
-       if (rdp->nocb_nobypass_count < nocb_nobypass_lim_per_jiffy) {
-               rcu_nocb_lock(rdp);
-               *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
-               if (*was_alldone)
-                       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
-                                           TPS("FirstQ"));
-               WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, j));
-               WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
-               return false; // Caller must enqueue the callback.
-       }
-
-       // If ->nocb_bypass has been used too long or is too full,
-       // flush ->nocb_bypass to ->cblist.
-       if ((ncbs && j != READ_ONCE(rdp->nocb_bypass_first)) ||
-           ncbs >= qhimark) {
-               rcu_nocb_lock(rdp);
-               if (!rcu_nocb_flush_bypass(rdp, rhp, j)) {
-                       *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
-                       if (*was_alldone)
-                               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
-                                                   TPS("FirstQ"));
-                       WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
-                       return false; // Caller must enqueue the callback.
-               }
-               if (j != rdp->nocb_gp_adv_time &&
-                   rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
-                   rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) {
-                       rcu_advance_cbs_nowake(rdp->mynode, rdp);
-                       rdp->nocb_gp_adv_time = j;
-               }
-               rcu_nocb_unlock_irqrestore(rdp, flags);
-               return true; // Callback already enqueued.
-       }
-
-       // We need to use the bypass.
-       rcu_nocb_wait_contended(rdp);
-       rcu_nocb_bypass_lock(rdp);
-       ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
-       rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */
-       rcu_cblist_enqueue(&rdp->nocb_bypass, rhp);
-       if (!ncbs) {
-               WRITE_ONCE(rdp->nocb_bypass_first, j);
-               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("FirstBQ"));
-       }
-       rcu_nocb_bypass_unlock(rdp);
-       smp_mb(); /* Order enqueue before wake. */
-       if (ncbs) {
-               local_irq_restore(flags);
-       } else {
-               // No-CBs GP kthread might be indefinitely asleep, if so, wake.
-               rcu_nocb_lock(rdp); // Rare during call_rcu() flood.
-               if (!rcu_segcblist_pend_cbs(&rdp->cblist)) {
-                       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
-                                           TPS("FirstBQwake"));
-                       __call_rcu_nocb_wake(rdp, true, flags);
-               } else {
-                       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
-                                           TPS("FirstBQnoWake"));
-                       rcu_nocb_unlock_irqrestore(rdp, flags);
-               }
-       }
-       return true; // Callback already enqueued.
-}
-
-/*
- * Awaken the no-CBs grace-period kthread if needed, either due to it
- * legitimately being asleep or due to overload conditions.
- *
- * If warranted, also wake up the kthread servicing this CPUs queues.
- */
-static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
-                                unsigned long flags)
-                                __releases(rdp->nocb_lock)
-{
-       unsigned long cur_gp_seq;
-       unsigned long j;
-       long len;
-       struct task_struct *t;
-
-       // If we are being polled or there is no kthread, just leave.
-       t = READ_ONCE(rdp->nocb_gp_kthread);
-       if (rcu_nocb_poll || !t) {
-               rcu_nocb_unlock_irqrestore(rdp, flags);
-               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
-                                   TPS("WakeNotPoll"));
-               return;
-       }
-       // Need to actually to a wakeup.
-       len = rcu_segcblist_n_cbs(&rdp->cblist);
-       if (was_alldone) {
-               rdp->qlen_last_fqs_check = len;
-               if (!irqs_disabled_flags(flags)) {
-                       /* ... if queue was empty ... */
-                       rcu_nocb_unlock_irqrestore(rdp, flags);
-                       wake_nocb_gp(rdp, false);
-                       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
-                                           TPS("WakeEmpty"));
-               } else {
-                       rcu_nocb_unlock_irqrestore(rdp, flags);
-                       wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE,
-                                          TPS("WakeEmptyIsDeferred"));
-               }
-       } else if (len > rdp->qlen_last_fqs_check + qhimark) {
-               /* ... or if many callbacks queued. */
-               rdp->qlen_last_fqs_check = len;
-               j = jiffies;
-               if (j != rdp->nocb_gp_adv_time &&
-                   rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
-                   rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) {
-                       rcu_advance_cbs_nowake(rdp->mynode, rdp);
-                       rdp->nocb_gp_adv_time = j;
-               }
-               smp_mb(); /* Enqueue before timer_pending(). */
-               if ((rdp->nocb_cb_sleep ||
-                    !rcu_segcblist_ready_cbs(&rdp->cblist)) &&
-                   !timer_pending(&rdp->nocb_timer)) {
-                       rcu_nocb_unlock_irqrestore(rdp, flags);
-                       wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_FORCE,
-                                          TPS("WakeOvfIsDeferred"));
-               } else {
-                       rcu_nocb_unlock_irqrestore(rdp, flags);
-                       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
-               }
-       } else {
-               rcu_nocb_unlock_irqrestore(rdp, flags);
-               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
-       }
-       return;
-}
-
-/*
- * Check if we ignore this rdp.
- *
- * We check that without holding the nocb lock but
- * we make sure not to miss a freshly offloaded rdp
- * with the current ordering:
- *
- *  rdp_offload_toggle()        nocb_gp_enabled_cb()
- * -------------------------   ----------------------------
- *    WRITE flags                 LOCK nocb_gp_lock
- *    LOCK nocb_gp_lock           READ/WRITE nocb_gp_sleep
- *    READ/WRITE nocb_gp_sleep    UNLOCK nocb_gp_lock
- *    UNLOCK nocb_gp_lock         READ flags
- */
-static inline bool nocb_gp_enabled_cb(struct rcu_data *rdp)
-{
-       u8 flags = SEGCBLIST_OFFLOADED | SEGCBLIST_KTHREAD_GP;
-
-       return rcu_segcblist_test_flags(&rdp->cblist, flags);
-}
-
-static inline bool nocb_gp_update_state_deoffloading(struct rcu_data *rdp,
-                                                    bool *needwake_state)
-{
-       struct rcu_segcblist *cblist = &rdp->cblist;
-
-       if (rcu_segcblist_test_flags(cblist, SEGCBLIST_OFFLOADED)) {
-               if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP)) {
-                       rcu_segcblist_set_flags(cblist, SEGCBLIST_KTHREAD_GP);
-                       if (rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB))
-                               *needwake_state = true;
-               }
-               return false;
-       }
-
-       /*
-        * De-offloading. Clear our flag and notify the de-offload worker.
-        * We will ignore this rdp until it ever gets re-offloaded.
-        */
-       WARN_ON_ONCE(!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP));
-       rcu_segcblist_clear_flags(cblist, SEGCBLIST_KTHREAD_GP);
-       if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB))
-               *needwake_state = true;
-       return true;
-}
-
-
-/*
- * No-CBs GP kthreads come here to wait for additional callbacks to show up
- * or for grace periods to end.
- */
-static void nocb_gp_wait(struct rcu_data *my_rdp)
-{
-       bool bypass = false;
-       long bypass_ncbs;
-       int __maybe_unused cpu = my_rdp->cpu;
-       unsigned long cur_gp_seq;
-       unsigned long flags;
-       bool gotcbs = false;
-       unsigned long j = jiffies;
-       bool needwait_gp = false; // This prevents actual uninitialized use.
-       bool needwake;
-       bool needwake_gp;
-       struct rcu_data *rdp;
-       struct rcu_node *rnp;
-       unsigned long wait_gp_seq = 0; // Suppress "use uninitialized" warning.
-       bool wasempty = false;
-
-       /*
-        * Each pass through the following loop checks for CBs and for the
-        * nearest grace period (if any) to wait for next.  The CB kthreads
-        * and the global grace-period kthread are awakened if needed.
-        */
-       WARN_ON_ONCE(my_rdp->nocb_gp_rdp != my_rdp);
-       for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_cb_rdp) {
-               bool needwake_state = false;
-
-               if (!nocb_gp_enabled_cb(rdp))
-                       continue;
-               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Check"));
-               rcu_nocb_lock_irqsave(rdp, flags);
-               if (nocb_gp_update_state_deoffloading(rdp, &needwake_state)) {
-                       rcu_nocb_unlock_irqrestore(rdp, flags);
-                       if (needwake_state)
-                               swake_up_one(&rdp->nocb_state_wq);
-                       continue;
-               }
-               bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
-               if (bypass_ncbs &&
-                   (time_after(j, READ_ONCE(rdp->nocb_bypass_first) + 1) ||
-                    bypass_ncbs > 2 * qhimark)) {
-                       // Bypass full or old, so flush it.
-                       (void)rcu_nocb_try_flush_bypass(rdp, j);
-                       bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
-               } else if (!bypass_ncbs && rcu_segcblist_empty(&rdp->cblist)) {
-                       rcu_nocb_unlock_irqrestore(rdp, flags);
-                       if (needwake_state)
-                               swake_up_one(&rdp->nocb_state_wq);
-                       continue; /* No callbacks here, try next. */
-               }
-               if (bypass_ncbs) {
-                       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
-                                           TPS("Bypass"));
-                       bypass = true;
-               }
-               rnp = rdp->mynode;
-
-               // Advance callbacks if helpful and low contention.
-               needwake_gp = false;
-               if (!rcu_segcblist_restempty(&rdp->cblist,
-                                            RCU_NEXT_READY_TAIL) ||
-                   (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
-                    rcu_seq_done(&rnp->gp_seq, cur_gp_seq))) {
-                       raw_spin_lock_rcu_node(rnp); /* irqs disabled. */
-                       needwake_gp = rcu_advance_cbs(rnp, rdp);
-                       wasempty = rcu_segcblist_restempty(&rdp->cblist,
-                                                          RCU_NEXT_READY_TAIL);
-                       raw_spin_unlock_rcu_node(rnp); /* irqs disabled. */
-               }
-               // Need to wait on some grace period?
-               WARN_ON_ONCE(wasempty &&
-                            !rcu_segcblist_restempty(&rdp->cblist,
-                                                     RCU_NEXT_READY_TAIL));
-               if (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq)) {
-                       if (!needwait_gp ||
-                           ULONG_CMP_LT(cur_gp_seq, wait_gp_seq))
-                               wait_gp_seq = cur_gp_seq;
-                       needwait_gp = true;
-                       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
-                                           TPS("NeedWaitGP"));
-               }
-               if (rcu_segcblist_ready_cbs(&rdp->cblist)) {
-                       needwake = rdp->nocb_cb_sleep;
-                       WRITE_ONCE(rdp->nocb_cb_sleep, false);
-                       smp_mb(); /* CB invocation -after- GP end. */
-               } else {
-                       needwake = false;
-               }
-               rcu_nocb_unlock_irqrestore(rdp, flags);
-               if (needwake) {
-                       swake_up_one(&rdp->nocb_cb_wq);
-                       gotcbs = true;
-               }
-               if (needwake_gp)
-                       rcu_gp_kthread_wake();
-               if (needwake_state)
-                       swake_up_one(&rdp->nocb_state_wq);
-       }
-
-       my_rdp->nocb_gp_bypass = bypass;
-       my_rdp->nocb_gp_gp = needwait_gp;
-       my_rdp->nocb_gp_seq = needwait_gp ? wait_gp_seq : 0;
-
-       if (bypass && !rcu_nocb_poll) {
-               // At least one child with non-empty ->nocb_bypass, so set
-               // timer in order to avoid stranding its callbacks.
-               wake_nocb_gp_defer(my_rdp, RCU_NOCB_WAKE_BYPASS,
-                                  TPS("WakeBypassIsDeferred"));
-       }
-       if (rcu_nocb_poll) {
-               /* Polling, so trace if first poll in the series. */
-               if (gotcbs)
-                       trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("Poll"));
-               schedule_timeout_idle(1);
-       } else if (!needwait_gp) {
-               /* Wait for callbacks to appear. */
-               trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("Sleep"));
-               swait_event_interruptible_exclusive(my_rdp->nocb_gp_wq,
-                               !READ_ONCE(my_rdp->nocb_gp_sleep));
-               trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("EndSleep"));
-       } else {
-               rnp = my_rdp->mynode;
-               trace_rcu_this_gp(rnp, my_rdp, wait_gp_seq, TPS("StartWait"));
-               swait_event_interruptible_exclusive(
-                       rnp->nocb_gp_wq[rcu_seq_ctr(wait_gp_seq) & 0x1],
-                       rcu_seq_done(&rnp->gp_seq, wait_gp_seq) ||
-                       !READ_ONCE(my_rdp->nocb_gp_sleep));
-               trace_rcu_this_gp(rnp, my_rdp, wait_gp_seq, TPS("EndWait"));
-       }
-       if (!rcu_nocb_poll) {
-               raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags);
-               if (my_rdp->nocb_defer_wakeup > RCU_NOCB_WAKE_NOT) {
-                       WRITE_ONCE(my_rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
-                       del_timer(&my_rdp->nocb_timer);
-               }
-               WRITE_ONCE(my_rdp->nocb_gp_sleep, true);
-               raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags);
-       }
-       my_rdp->nocb_gp_seq = -1;
-       WARN_ON(signal_pending(current));
-}
-
-/*
- * No-CBs grace-period-wait kthread.  There is one of these per group
- * of CPUs, but only once at least one CPU in that group has come online
- * at least once since boot.  This kthread checks for newly posted
- * callbacks from any of the CPUs it is responsible for, waits for a
- * grace period, then awakens all of the rcu_nocb_cb_kthread() instances
- * that then have callback-invocation work to do.
- */
-static int rcu_nocb_gp_kthread(void *arg)
-{
-       struct rcu_data *rdp = arg;
-
-       for (;;) {
-               WRITE_ONCE(rdp->nocb_gp_loops, rdp->nocb_gp_loops + 1);
-               nocb_gp_wait(rdp);
-               cond_resched_tasks_rcu_qs();
-       }
-       return 0;
-}
-
-static inline bool nocb_cb_can_run(struct rcu_data *rdp)
-{
-       u8 flags = SEGCBLIST_OFFLOADED | SEGCBLIST_KTHREAD_CB;
-       return rcu_segcblist_test_flags(&rdp->cblist, flags);
-}
-
-static inline bool nocb_cb_wait_cond(struct rcu_data *rdp)
-{
-       return nocb_cb_can_run(rdp) && !READ_ONCE(rdp->nocb_cb_sleep);
-}
-
-/*
- * Invoke any ready callbacks from the corresponding no-CBs CPU,
- * then, if there are no more, wait for more to appear.
- */
-static void nocb_cb_wait(struct rcu_data *rdp)
-{
-       struct rcu_segcblist *cblist = &rdp->cblist;
-       unsigned long cur_gp_seq;
-       unsigned long flags;
-       bool needwake_state = false;
-       bool needwake_gp = false;
-       bool can_sleep = true;
-       struct rcu_node *rnp = rdp->mynode;
-
-       local_irq_save(flags);
-       rcu_momentary_dyntick_idle();
-       local_irq_restore(flags);
-       /*
-        * Disable BH to provide the expected environment.  Also, when
-        * transitioning to/from NOCB mode, a self-requeuing callback might
-        * be invoked from softirq.  A short grace period could cause both
-        * instances of this callback would execute concurrently.
-        */
-       local_bh_disable();
-       rcu_do_batch(rdp);
-       local_bh_enable();
-       lockdep_assert_irqs_enabled();
-       rcu_nocb_lock_irqsave(rdp, flags);
-       if (rcu_segcblist_nextgp(cblist, &cur_gp_seq) &&
-           rcu_seq_done(&rnp->gp_seq, cur_gp_seq) &&
-           raw_spin_trylock_rcu_node(rnp)) { /* irqs already disabled. */
-               needwake_gp = rcu_advance_cbs(rdp->mynode, rdp);
-               raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
-       }
-
-       if (rcu_segcblist_test_flags(cblist, SEGCBLIST_OFFLOADED)) {
-               if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB)) {
-                       rcu_segcblist_set_flags(cblist, SEGCBLIST_KTHREAD_CB);
-                       if (rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP))
-                               needwake_state = true;
-               }
-               if (rcu_segcblist_ready_cbs(cblist))
-                       can_sleep = false;
-       } else {
-               /*
-                * De-offloading. Clear our flag and notify the de-offload worker.
-                * We won't touch the callbacks and keep sleeping until we ever
-                * get re-offloaded.
-                */
-               WARN_ON_ONCE(!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB));
-               rcu_segcblist_clear_flags(cblist, SEGCBLIST_KTHREAD_CB);
-               if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP))
-                       needwake_state = true;
-       }
-
-       WRITE_ONCE(rdp->nocb_cb_sleep, can_sleep);
-
-       if (rdp->nocb_cb_sleep)
-               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("CBSleep"));
-
-       rcu_nocb_unlock_irqrestore(rdp, flags);
-       if (needwake_gp)
-               rcu_gp_kthread_wake();
-
-       if (needwake_state)
-               swake_up_one(&rdp->nocb_state_wq);
-
-       do {
-               swait_event_interruptible_exclusive(rdp->nocb_cb_wq,
-                                                   nocb_cb_wait_cond(rdp));
-
-               // VVV Ensure CB invocation follows _sleep test.
-               if (smp_load_acquire(&rdp->nocb_cb_sleep)) { // ^^^
-                       WARN_ON(signal_pending(current));
-                       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty"));
-               }
-       } while (!nocb_cb_can_run(rdp));
-}
-
-/*
- * Per-rcu_data kthread, but only for no-CBs CPUs.  Repeatedly invoke
- * nocb_cb_wait() to do the dirty work.
- */
-static int rcu_nocb_cb_kthread(void *arg)
-{
-       struct rcu_data *rdp = arg;
-
-       // Each pass through this loop does one callback batch, and,
-       // if there are no more ready callbacks, waits for them.
-       for (;;) {
-               nocb_cb_wait(rdp);
-               cond_resched_tasks_rcu_qs();
-       }
-       return 0;
-}
-
-/* Is a deferred wakeup of rcu_nocb_kthread() required? */
-static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level)
-{
-       return READ_ONCE(rdp->nocb_defer_wakeup) >= level;
-}
-
-/* Do a deferred wakeup of rcu_nocb_kthread(). */
-static bool do_nocb_deferred_wakeup_common(struct rcu_data *rdp_gp,
-                                          struct rcu_data *rdp, int level,
-                                          unsigned long flags)
-       __releases(rdp_gp->nocb_gp_lock)
-{
-       int ndw;
-       int ret;
-
-       if (!rcu_nocb_need_deferred_wakeup(rdp_gp, level)) {
-               raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
-               return false;
-       }
-
-       ndw = rdp_gp->nocb_defer_wakeup;
-       ret = __wake_nocb_gp(rdp_gp, rdp, ndw == RCU_NOCB_WAKE_FORCE, flags);
-       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DeferredWake"));
-
-       return ret;
-}
-
-/* Do a deferred wakeup of rcu_nocb_kthread() from a timer handler. */
-static void do_nocb_deferred_wakeup_timer(struct timer_list *t)
-{
-       unsigned long flags;
-       struct rcu_data *rdp = from_timer(rdp, t, nocb_timer);
-
-       WARN_ON_ONCE(rdp->nocb_gp_rdp != rdp);
-       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Timer"));
-
-       raw_spin_lock_irqsave(&rdp->nocb_gp_lock, flags);
-       smp_mb__after_spinlock(); /* Timer expire before wakeup. */
-       do_nocb_deferred_wakeup_common(rdp, rdp, RCU_NOCB_WAKE_BYPASS, flags);
-}
-
-/*
- * Do a deferred wakeup of rcu_nocb_kthread() from fastpath.
- * This means we do an inexact common-case check.  Note that if
- * we miss, ->nocb_timer will eventually clean things up.
- */
-static bool do_nocb_deferred_wakeup(struct rcu_data *rdp)
-{
-       unsigned long flags;
-       struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
-
-       if (!rdp_gp || !rcu_nocb_need_deferred_wakeup(rdp_gp, RCU_NOCB_WAKE))
-               return false;
-
-       raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
-       return do_nocb_deferred_wakeup_common(rdp_gp, rdp, RCU_NOCB_WAKE, flags);
-}
-
-void rcu_nocb_flush_deferred_wakeup(void)
-{
-       do_nocb_deferred_wakeup(this_cpu_ptr(&rcu_data));
-}
-EXPORT_SYMBOL_GPL(rcu_nocb_flush_deferred_wakeup);
-
-static int rdp_offload_toggle(struct rcu_data *rdp,
-                              bool offload, unsigned long flags)
-       __releases(rdp->nocb_lock)
-{
-       struct rcu_segcblist *cblist = &rdp->cblist;
-       struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
-       bool wake_gp = false;
-
-       rcu_segcblist_offload(cblist, offload);
-
-       if (rdp->nocb_cb_sleep)
-               rdp->nocb_cb_sleep = false;
-       rcu_nocb_unlock_irqrestore(rdp, flags);
-
-       /*
-        * Ignore former value of nocb_cb_sleep and force wake up as it could
-        * have been spuriously set to false already.
-        */
-       swake_up_one(&rdp->nocb_cb_wq);
-
-       raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
-       if (rdp_gp->nocb_gp_sleep) {
-               rdp_gp->nocb_gp_sleep = false;
-               wake_gp = true;
-       }
-       raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
-
-       if (wake_gp)
-               wake_up_process(rdp_gp->nocb_gp_kthread);
-
-       return 0;
-}
-
-static long rcu_nocb_rdp_deoffload(void *arg)
-{
-       struct rcu_data *rdp = arg;
-       struct rcu_segcblist *cblist = &rdp->cblist;
-       unsigned long flags;
-       int ret;
-
-       WARN_ON_ONCE(rdp->cpu != raw_smp_processor_id());
-
-       pr_info("De-offloading %d\n", rdp->cpu);
-
-       rcu_nocb_lock_irqsave(rdp, flags);
-       /*
-        * Flush once and for all now. This suffices because we are
-        * running on the target CPU holding ->nocb_lock (thus having
-        * interrupts disabled), and because rdp_offload_toggle()
-        * invokes rcu_segcblist_offload(), which clears SEGCBLIST_OFFLOADED.
-        * Thus future calls to rcu_segcblist_completely_offloaded() will
-        * return false, which means that future calls to rcu_nocb_try_bypass()
-        * will refuse to put anything into the bypass.
-        */
-       WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies));
-       ret = rdp_offload_toggle(rdp, false, flags);
-       swait_event_exclusive(rdp->nocb_state_wq,
-                             !rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB |
-                                                       SEGCBLIST_KTHREAD_GP));
-       /*
-        * Lock one last time to acquire latest callback updates from kthreads
-        * so we can later handle callbacks locally without locking.
-        */
-       rcu_nocb_lock_irqsave(rdp, flags);
-       /*
-        * Theoretically we could set SEGCBLIST_SOFTIRQ_ONLY after the nocb
-        * lock is released but how about being paranoid for once?
-        */
-       rcu_segcblist_set_flags(cblist, SEGCBLIST_SOFTIRQ_ONLY);
-       /*
-        * With SEGCBLIST_SOFTIRQ_ONLY, we can't use
-        * rcu_nocb_unlock_irqrestore() anymore.
-        */
-       raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
-
-       /* Sanity check */
-       WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
-
-
-       return ret;
-}
-
-int rcu_nocb_cpu_deoffload(int cpu)
-{
-       struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
-       int ret = 0;
-
-       mutex_lock(&rcu_state.barrier_mutex);
-       cpus_read_lock();
-       if (rcu_rdp_is_offloaded(rdp)) {
-               if (cpu_online(cpu)) {
-                       ret = work_on_cpu(cpu, rcu_nocb_rdp_deoffload, rdp);
-                       if (!ret)
-                               cpumask_clear_cpu(cpu, rcu_nocb_mask);
-               } else {
-                       pr_info("NOCB: Can't CB-deoffload an offline CPU\n");
-                       ret = -EINVAL;
-               }
-       }
-       cpus_read_unlock();
-       mutex_unlock(&rcu_state.barrier_mutex);
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(rcu_nocb_cpu_deoffload);
-
-static long rcu_nocb_rdp_offload(void *arg)
-{
-       struct rcu_data *rdp = arg;
-       struct rcu_segcblist *cblist = &rdp->cblist;
-       unsigned long flags;
-       int ret;
-
-       WARN_ON_ONCE(rdp->cpu != raw_smp_processor_id());
-       /*
-        * For now we only support re-offload, ie: the rdp must have been
-        * offloaded on boot first.
-        */
-       if (!rdp->nocb_gp_rdp)
-               return -EINVAL;
-
-       pr_info("Offloading %d\n", rdp->cpu);
-       /*
-        * Can't use rcu_nocb_lock_irqsave() while we are in
-        * SEGCBLIST_SOFTIRQ_ONLY mode.
-        */
-       raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
-
-       /*
-        * We didn't take the nocb lock while working on the
-        * rdp->cblist in SEGCBLIST_SOFTIRQ_ONLY mode.
-        * Every modifications that have been done previously on
-        * rdp->cblist must be visible remotely by the nocb kthreads
-        * upon wake up after reading the cblist flags.
-        *
-        * The layout against nocb_lock enforces that ordering:
-        *
-        *  __rcu_nocb_rdp_offload()   nocb_cb_wait()/nocb_gp_wait()
-        * -------------------------   ----------------------------
-        *      WRITE callbacks           rcu_nocb_lock()
-        *      rcu_nocb_lock()           READ flags
-        *      WRITE flags               READ callbacks
-        *      rcu_nocb_unlock()         rcu_nocb_unlock()
-        */
-       ret = rdp_offload_toggle(rdp, true, flags);
-       swait_event_exclusive(rdp->nocb_state_wq,
-                             rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB) &&
-                             rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP));
-
-       return ret;
-}
-
-int rcu_nocb_cpu_offload(int cpu)
-{
-       struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
-       int ret = 0;
-
-       mutex_lock(&rcu_state.barrier_mutex);
-       cpus_read_lock();
-       if (!rcu_rdp_is_offloaded(rdp)) {
-               if (cpu_online(cpu)) {
-                       ret = work_on_cpu(cpu, rcu_nocb_rdp_offload, rdp);
-                       if (!ret)
-                               cpumask_set_cpu(cpu, rcu_nocb_mask);
-               } else {
-                       pr_info("NOCB: Can't CB-offload an offline CPU\n");
-                       ret = -EINVAL;
-               }
-       }
-       cpus_read_unlock();
-       mutex_unlock(&rcu_state.barrier_mutex);
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(rcu_nocb_cpu_offload);
-
-void __init rcu_init_nohz(void)
-{
-       int cpu;
-       bool need_rcu_nocb_mask = false;
-       struct rcu_data *rdp;
-
-#if defined(CONFIG_NO_HZ_FULL)
-       if (tick_nohz_full_running && cpumask_weight(tick_nohz_full_mask))
-               need_rcu_nocb_mask = true;
-#endif /* #if defined(CONFIG_NO_HZ_FULL) */
-
-       if (!cpumask_available(rcu_nocb_mask) && need_rcu_nocb_mask) {
-               if (!zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL)) {
-                       pr_info("rcu_nocb_mask allocation failed, callback offloading disabled.\n");
-                       return;
-               }
-       }
-       if (!cpumask_available(rcu_nocb_mask))
-               return;
-
-#if defined(CONFIG_NO_HZ_FULL)
-       if (tick_nohz_full_running)
-               cpumask_or(rcu_nocb_mask, rcu_nocb_mask, tick_nohz_full_mask);
-#endif /* #if defined(CONFIG_NO_HZ_FULL) */
-
-       if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) {
-               pr_info("\tNote: kernel parameter 'rcu_nocbs=', 'nohz_full', or 'isolcpus=' contains nonexistent CPUs.\n");
-               cpumask_and(rcu_nocb_mask, cpu_possible_mask,
-                           rcu_nocb_mask);
-       }
-       if (cpumask_empty(rcu_nocb_mask))
-               pr_info("\tOffload RCU callbacks from CPUs: (none).\n");
-       else
-               pr_info("\tOffload RCU callbacks from CPUs: %*pbl.\n",
-                       cpumask_pr_args(rcu_nocb_mask));
-       if (rcu_nocb_poll)
-               pr_info("\tPoll for callbacks from no-CBs CPUs.\n");
-
-       for_each_cpu(cpu, rcu_nocb_mask) {
-               rdp = per_cpu_ptr(&rcu_data, cpu);
-               if (rcu_segcblist_empty(&rdp->cblist))
-                       rcu_segcblist_init(&rdp->cblist);
-               rcu_segcblist_offload(&rdp->cblist, true);
-               rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_KTHREAD_CB);
-               rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_KTHREAD_GP);
-       }
-       rcu_organize_nocb_kthreads();
-}
-
-/* Initialize per-rcu_data variables for no-CBs CPUs. */
-static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
-{
-       init_swait_queue_head(&rdp->nocb_cb_wq);
-       init_swait_queue_head(&rdp->nocb_gp_wq);
-       init_swait_queue_head(&rdp->nocb_state_wq);
-       raw_spin_lock_init(&rdp->nocb_lock);
-       raw_spin_lock_init(&rdp->nocb_bypass_lock);
-       raw_spin_lock_init(&rdp->nocb_gp_lock);
-       timer_setup(&rdp->nocb_timer, do_nocb_deferred_wakeup_timer, 0);
-       rcu_cblist_init(&rdp->nocb_bypass);
-}
-
-/*
- * If the specified CPU is a no-CBs CPU that does not already have its
- * rcuo CB kthread, spawn it.  Additionally, if the rcuo GP kthread
- * for this CPU's group has not yet been created, spawn it as well.
- */
-static void rcu_spawn_one_nocb_kthread(int cpu)
-{
-       struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
-       struct rcu_data *rdp_gp;
-       struct task_struct *t;
-
-       /*
-        * If this isn't a no-CBs CPU or if it already has an rcuo kthread,
-        * then nothing to do.
-        */
-       if (!rcu_is_nocb_cpu(cpu) || rdp->nocb_cb_kthread)
-               return;
-
-       /* If we didn't spawn the GP kthread first, reorganize! */
-       rdp_gp = rdp->nocb_gp_rdp;
-       if (!rdp_gp->nocb_gp_kthread) {
-               t = kthread_run(rcu_nocb_gp_kthread, rdp_gp,
-                               "rcuog/%d", rdp_gp->cpu);
-               if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo GP kthread, OOM is now expected behavior\n", __func__))
-                       return;
-               WRITE_ONCE(rdp_gp->nocb_gp_kthread, t);
-       }
-
-       /* Spawn the kthread for this CPU. */
-       t = kthread_run(rcu_nocb_cb_kthread, rdp,
-                       "rcuo%c/%d", rcu_state.abbr, cpu);
-       if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo CB kthread, OOM is now expected behavior\n", __func__))
-               return;
-       WRITE_ONCE(rdp->nocb_cb_kthread, t);
-       WRITE_ONCE(rdp->nocb_gp_kthread, rdp_gp->nocb_gp_kthread);
-}
-
-/*
- * If the specified CPU is a no-CBs CPU that does not already have its
- * rcuo kthread, spawn it.
- */
-static void rcu_spawn_cpu_nocb_kthread(int cpu)
-{
-       if (rcu_scheduler_fully_active)
-               rcu_spawn_one_nocb_kthread(cpu);
-}
-
-/*
- * Once the scheduler is running, spawn rcuo kthreads for all online
- * no-CBs CPUs.  This assumes that the early_initcall()s happen before
- * non-boot CPUs come online -- if this changes, we will need to add
- * some mutual exclusion.
- */
-static void __init rcu_spawn_nocb_kthreads(void)
-{
-       int cpu;
-
-       for_each_online_cpu(cpu)
-               rcu_spawn_cpu_nocb_kthread(cpu);
-}
-
-/* How many CB CPU IDs per GP kthread?  Default of -1 for sqrt(nr_cpu_ids). */
-static int rcu_nocb_gp_stride = -1;
-module_param(rcu_nocb_gp_stride, int, 0444);
-
-/*
- * Initialize GP-CB relationships for all no-CBs CPU.
- */
-static void __init rcu_organize_nocb_kthreads(void)
-{
-       int cpu;
-       bool firsttime = true;
-       bool gotnocbs = false;
-       bool gotnocbscbs = true;
-       int ls = rcu_nocb_gp_stride;
-       int nl = 0;  /* Next GP kthread. */
-       struct rcu_data *rdp;
-       struct rcu_data *rdp_gp = NULL;  /* Suppress misguided gcc warn. */
-       struct rcu_data *rdp_prev = NULL;
-
-       if (!cpumask_available(rcu_nocb_mask))
-               return;
-       if (ls == -1) {
-               ls = nr_cpu_ids / int_sqrt(nr_cpu_ids);
-               rcu_nocb_gp_stride = ls;
-       }
-
-       /*
-        * Each pass through this loop sets up one rcu_data structure.
-        * Should the corresponding CPU come online in the future, then
-        * we will spawn the needed set of rcu_nocb_kthread() kthreads.
-        */
-       for_each_cpu(cpu, rcu_nocb_mask) {
-               rdp = per_cpu_ptr(&rcu_data, cpu);
-               if (rdp->cpu >= nl) {
-                       /* New GP kthread, set up for CBs & next GP. */
-                       gotnocbs = true;
-                       nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls;
-                       rdp->nocb_gp_rdp = rdp;
-                       rdp_gp = rdp;
-                       if (dump_tree) {
-                               if (!firsttime)
-                                       pr_cont("%s\n", gotnocbscbs
-                                                       ? "" : " (self only)");
-                               gotnocbscbs = false;
-                               firsttime = false;
-                               pr_alert("%s: No-CB GP kthread CPU %d:",
-                                        __func__, cpu);
-                       }
-               } else {
-                       /* Another CB kthread, link to previous GP kthread. */
-                       gotnocbscbs = true;
-                       rdp->nocb_gp_rdp = rdp_gp;
-                       rdp_prev->nocb_next_cb_rdp = rdp;
-                       if (dump_tree)
-                               pr_cont(" %d", cpu);
-               }
-               rdp_prev = rdp;
-       }
-       if (gotnocbs && dump_tree)
-               pr_cont("%s\n", gotnocbscbs ? "" : " (self only)");
-}
-
-/*
- * Bind the current task to the offloaded CPUs.  If there are no offloaded
- * CPUs, leave the task unbound.  Splat if the bind attempt fails.
- */
-void rcu_bind_current_to_nocb(void)
-{
-       if (cpumask_available(rcu_nocb_mask) && cpumask_weight(rcu_nocb_mask))
-               WARN_ON(sched_setaffinity(current->pid, rcu_nocb_mask));
-}
-EXPORT_SYMBOL_GPL(rcu_bind_current_to_nocb);
-
-// The ->on_cpu field is available only in CONFIG_SMP=y, so...
-#ifdef CONFIG_SMP
-static char *show_rcu_should_be_on_cpu(struct task_struct *tsp)
-{
-       return tsp && task_is_running(tsp) && !tsp->on_cpu ? "!" : "";
-}
-#else // #ifdef CONFIG_SMP
-static char *show_rcu_should_be_on_cpu(struct task_struct *tsp)
-{
-       return "";
-}
-#endif // #else #ifdef CONFIG_SMP
-
-/*
- * Dump out nocb grace-period kthread state for the specified rcu_data
- * structure.
- */
-static void show_rcu_nocb_gp_state(struct rcu_data *rdp)
-{
-       struct rcu_node *rnp = rdp->mynode;
-
-       pr_info("nocb GP %d %c%c%c%c%c %c[%c%c] %c%c:%ld rnp %d:%d %lu %c CPU %d%s\n",
-               rdp->cpu,
-               "kK"[!!rdp->nocb_gp_kthread],
-               "lL"[raw_spin_is_locked(&rdp->nocb_gp_lock)],
-               "dD"[!!rdp->nocb_defer_wakeup],
-               "tT"[timer_pending(&rdp->nocb_timer)],
-               "sS"[!!rdp->nocb_gp_sleep],
-               ".W"[swait_active(&rdp->nocb_gp_wq)],
-               ".W"[swait_active(&rnp->nocb_gp_wq[0])],
-               ".W"[swait_active(&rnp->nocb_gp_wq[1])],
-               ".B"[!!rdp->nocb_gp_bypass],
-               ".G"[!!rdp->nocb_gp_gp],
-               (long)rdp->nocb_gp_seq,
-               rnp->grplo, rnp->grphi, READ_ONCE(rdp->nocb_gp_loops),
-               rdp->nocb_gp_kthread ? task_state_to_char(rdp->nocb_gp_kthread) : '.',
-               rdp->nocb_cb_kthread ? (int)task_cpu(rdp->nocb_gp_kthread) : -1,
-               show_rcu_should_be_on_cpu(rdp->nocb_cb_kthread));
-}
-
-/* Dump out nocb kthread state for the specified rcu_data structure. */
-static void show_rcu_nocb_state(struct rcu_data *rdp)
-{
-       char bufw[20];
-       char bufr[20];
-       struct rcu_segcblist *rsclp = &rdp->cblist;
-       bool waslocked;
-       bool wassleep;
-
-       if (rdp->nocb_gp_rdp == rdp)
-               show_rcu_nocb_gp_state(rdp);
-
-       sprintf(bufw, "%ld", rsclp->gp_seq[RCU_WAIT_TAIL]);
-       sprintf(bufr, "%ld", rsclp->gp_seq[RCU_NEXT_READY_TAIL]);
-       pr_info("   CB %d^%d->%d %c%c%c%c%c%c F%ld L%ld C%d %c%c%s%c%s%c%c q%ld %c CPU %d%s\n",
-               rdp->cpu, rdp->nocb_gp_rdp->cpu,
-               rdp->nocb_next_cb_rdp ? rdp->nocb_next_cb_rdp->cpu : -1,
-               "kK"[!!rdp->nocb_cb_kthread],
-               "bB"[raw_spin_is_locked(&rdp->nocb_bypass_lock)],
-               "cC"[!!atomic_read(&rdp->nocb_lock_contended)],
-               "lL"[raw_spin_is_locked(&rdp->nocb_lock)],
-               "sS"[!!rdp->nocb_cb_sleep],
-               ".W"[swait_active(&rdp->nocb_cb_wq)],
-               jiffies - rdp->nocb_bypass_first,
-               jiffies - rdp->nocb_nobypass_last,
-               rdp->nocb_nobypass_count,
-               ".D"[rcu_segcblist_ready_cbs(rsclp)],
-               ".W"[!rcu_segcblist_segempty(rsclp, RCU_WAIT_TAIL)],
-               rcu_segcblist_segempty(rsclp, RCU_WAIT_TAIL) ? "" : bufw,
-               ".R"[!rcu_segcblist_segempty(rsclp, RCU_NEXT_READY_TAIL)],
-               rcu_segcblist_segempty(rsclp, RCU_NEXT_READY_TAIL) ? "" : bufr,
-               ".N"[!rcu_segcblist_segempty(rsclp, RCU_NEXT_TAIL)],
-               ".B"[!!rcu_cblist_n_cbs(&rdp->nocb_bypass)],
-               rcu_segcblist_n_cbs(&rdp->cblist),
-               rdp->nocb_cb_kthread ? task_state_to_char(rdp->nocb_cb_kthread) : '.',
-               rdp->nocb_cb_kthread ? (int)task_cpu(rdp->nocb_gp_kthread) : -1,
-               show_rcu_should_be_on_cpu(rdp->nocb_cb_kthread));
-
-       /* It is OK for GP kthreads to have GP state. */
-       if (rdp->nocb_gp_rdp == rdp)
-               return;
-
-       waslocked = raw_spin_is_locked(&rdp->nocb_gp_lock);
-       wassleep = swait_active(&rdp->nocb_gp_wq);
-       if (!rdp->nocb_gp_sleep && !waslocked && !wassleep)
-               return;  /* Nothing untoward. */
-
-       pr_info("   nocb GP activity on CB-only CPU!!! %c%c%c %c\n",
-               "lL"[waslocked],
-               "dD"[!!rdp->nocb_defer_wakeup],
-               "sS"[!!rdp->nocb_gp_sleep],
-               ".W"[wassleep]);
-}
-
-#else /* #ifdef CONFIG_RCU_NOCB_CPU */
-
-/* No ->nocb_lock to acquire.  */
-static void rcu_nocb_lock(struct rcu_data *rdp)
-{
-}
-
-/* No ->nocb_lock to release.  */
-static void rcu_nocb_unlock(struct rcu_data *rdp)
-{
-}
-
-/* No ->nocb_lock to release.  */
-static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
-                                      unsigned long flags)
-{
-       local_irq_restore(flags);
-}
-
-/* Lockdep check that ->cblist may be safely accessed. */
-static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp)
-{
-       lockdep_assert_irqs_disabled();
-}
-
-static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
-{
-}
-
-static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
-{
-       return NULL;
-}
-
-static void rcu_init_one_nocb(struct rcu_node *rnp)
-{
-}
-
-static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
-                                 unsigned long j)
-{
-       return true;
-}
-
-static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
-                               bool *was_alldone, unsigned long flags)
-{
-       return false;
-}
-
-static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty,
-                                unsigned long flags)
-{
-       WARN_ON_ONCE(1);  /* Should be dead code! */
-}
-
-static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
-{
-}
-
-static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level)
-{
-       return false;
-}
-
-static bool do_nocb_deferred_wakeup(struct rcu_data *rdp)
-{
-       return false;
-}
-
-static void rcu_spawn_cpu_nocb_kthread(int cpu)
-{
-}
-
-static void __init rcu_spawn_nocb_kthreads(void)
-{
-}
-
-static void show_rcu_nocb_state(struct rcu_data *rdp)
-{
-}
-
-#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
-
 /*
  * Is this CPU a NO_HZ_FULL CPU that should ignore RCU so that the
  * grace-period kthread will do force_quiescent_state() processing?
@@ -2982,17 +1498,17 @@ static void noinstr rcu_dynticks_task_exit(void)
 /* Turn on heavyweight RCU tasks trace readers on idle/user entry. */
 static void rcu_dynticks_task_trace_enter(void)
 {
-#ifdef CONFIG_TASKS_RCU_TRACE
+#ifdef CONFIG_TASKS_TRACE_RCU
        if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
                current->trc_reader_special.b.need_mb = true;
-#endif /* #ifdef CONFIG_TASKS_RCU_TRACE */
+#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
 }
 
 /* Turn off heavyweight RCU tasks trace readers on idle/user exit. */
 static void rcu_dynticks_task_trace_exit(void)
 {
-#ifdef CONFIG_TASKS_RCU_TRACE
+#ifdef CONFIG_TASKS_TRACE_RCU
        if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
                current->trc_reader_special.b.need_mb = false;
-#endif /* #ifdef CONFIG_TASKS_RCU_TRACE */
+#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
 }
index 6c76988..677ee3d 100644 (file)
@@ -7,6 +7,8 @@
  * Author: Paul E. McKenney <paulmck@linux.ibm.com>
  */
 
+#include <linux/kvm_para.h>
+
 //////////////////////////////////////////////////////////////////////////////
 //
 // Controlling CPU stall warnings, including delay calculation.
@@ -117,17 +119,14 @@ static void panic_on_rcu_stall(void)
 }
 
 /**
- * rcu_cpu_stall_reset - prevent further stall warnings in current grace period
- *
- * Set the stall-warning timeout way off into the future, thus preventing
- * any RCU CPU stall-warning messages from appearing in the current set of
- * RCU grace periods.
+ * rcu_cpu_stall_reset - restart stall-warning timeout for current grace period
  *
  * The caller must disable hard irqs.
  */
 void rcu_cpu_stall_reset(void)
 {
-       WRITE_ONCE(rcu_state.jiffies_stall, jiffies + ULONG_MAX / 2);
+       WRITE_ONCE(rcu_state.jiffies_stall,
+                  jiffies + rcu_jiffies_till_stall_check());
 }
 
 //////////////////////////////////////////////////////////////////////////////
@@ -267,8 +266,10 @@ static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
        struct task_struct *ts[8];
 
        lockdep_assert_irqs_disabled();
-       if (!rcu_preempt_blocked_readers_cgp(rnp))
+       if (!rcu_preempt_blocked_readers_cgp(rnp)) {
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                return 0;
+       }
        pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
               rnp->level, rnp->grplo, rnp->grphi);
        t = list_entry(rnp->gp_tasks->prev,
@@ -280,8 +281,8 @@ static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
                        break;
        }
        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-       for (i--; i; i--) {
-               t = ts[i];
+       while (i) {
+               t = ts[--i];
                if (!try_invoke_on_locked_down_task(t, check_slow_task, &rscr))
                        pr_cont(" P%d", t->pid);
                else
@@ -350,7 +351,7 @@ static void rcu_dump_cpu_stacks(void)
 
 static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
 {
-       struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
+       struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
 
        sprintf(cp, "last_accelerate: %04lx/%04lx dyntick_enabled: %d",
                rdp->last_accelerate & 0xffff, jiffies & 0xffff,
@@ -464,9 +465,10 @@ static void rcu_check_gp_kthread_starvation(void)
                pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#x ->cpu=%d\n",
                       rcu_state.name, j,
                       (long)rcu_seq_current(&rcu_state.gp_seq),
-                      data_race(rcu_state.gp_flags),
-                      gp_state_getname(rcu_state.gp_state), rcu_state.gp_state,
-                      gpk ? gpk->__state : ~0, cpu);
+                      data_race(READ_ONCE(rcu_state.gp_flags)),
+                      gp_state_getname(rcu_state.gp_state),
+                      data_race(READ_ONCE(rcu_state.gp_state)),
+                      gpk ? data_race(READ_ONCE(gpk->__state)) : ~0, cpu);
                if (gpk) {
                        pr_err("\tUnless %s kthread gets sufficient CPU time, OOM is now expected behavior.\n", rcu_state.name);
                        pr_err("RCU grace-period kthread stack dump:\n");
@@ -509,7 +511,7 @@ static void rcu_check_gp_kthread_expired_fqs_timer(void)
                       (long)rcu_seq_current(&rcu_state.gp_seq),
                       data_race(rcu_state.gp_flags),
                       gp_state_getname(RCU_GP_WAIT_FQS), RCU_GP_WAIT_FQS,
-                      gpk->__state);
+                      data_race(READ_ONCE(gpk->__state)));
                pr_err("\tPossible timer handling issue on cpu=%d timer-softirq=%u\n",
                       cpu, kstat_softirqs_cpu(TIMER_SOFTIRQ, cpu));
        }
@@ -568,11 +570,11 @@ static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps)
                        pr_err("INFO: Stall ended before state dump start\n");
                } else {
                        j = jiffies;
-                       gpa = data_race(rcu_state.gp_activity);
+                       gpa = data_race(READ_ONCE(rcu_state.gp_activity));
                        pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
                               rcu_state.name, j - gpa, j, gpa,
-                              data_race(jiffies_till_next_fqs),
-                              rcu_get_root()->qsmask);
+                              data_race(READ_ONCE(jiffies_till_next_fqs)),
+                              data_race(READ_ONCE(rcu_get_root()->qsmask)));
                }
        }
        /* Rewrite if needed in case of slow consoles. */
@@ -646,6 +648,7 @@ static void print_cpu_stall(unsigned long gps)
 
 static void check_cpu_stall(struct rcu_data *rdp)
 {
+       bool didstall = false;
        unsigned long gs1;
        unsigned long gs2;
        unsigned long gps;
@@ -691,24 +694,46 @@ static void check_cpu_stall(struct rcu_data *rdp)
            ULONG_CMP_GE(gps, js))
                return; /* No stall or GP completed since entering function. */
        rnp = rdp->mynode;
-       jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
+       jn = jiffies + ULONG_MAX / 2;
        if (rcu_gp_in_progress() &&
            (READ_ONCE(rnp->qsmask) & rdp->grpmask) &&
            cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
 
+               /*
+                * If a virtual machine is stopped by the host it can look to
+                * the watchdog like an RCU stall. Check to see if the host
+                * stopped the vm.
+                */
+               if (kvm_check_and_clear_guest_paused())
+                       return;
+
                /* We haven't checked in, so go dump stack. */
                print_cpu_stall(gps);
                if (READ_ONCE(rcu_cpu_stall_ftrace_dump))
                        rcu_ftrace_dump(DUMP_ALL);
+               didstall = true;
 
        } else if (rcu_gp_in_progress() &&
                   ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) &&
                   cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
 
+               /*
+                * If a virtual machine is stopped by the host it can look to
+                * the watchdog like an RCU stall. Check to see if the host
+                * stopped the vm.
+                */
+               if (kvm_check_and_clear_guest_paused())
+                       return;
+
                /* They had a few time units to dump stack, so complain. */
                print_other_cpu_stall(gs2, gps);
                if (READ_ONCE(rcu_cpu_stall_ftrace_dump))
                        rcu_ftrace_dump(DUMP_ALL);
+               didstall = true;
+       }
+       if (didstall && READ_ONCE(rcu_state.jiffies_stall) == jn) {
+               jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
+               WRITE_ONCE(rcu_state.jiffies_stall, jn);
        }
 }
 
@@ -742,7 +767,7 @@ bool rcu_check_boost_fail(unsigned long gp_state, int *cpup)
 
        rcu_for_each_leaf_node(rnp) {
                if (!cpup) {
-                       if (READ_ONCE(rnp->qsmask)) {
+                       if (data_race(READ_ONCE(rnp->qsmask))) {
                                return false;
                        } else {
                                if (READ_ONCE(rnp->gp_tasks))
@@ -791,32 +816,34 @@ void show_rcu_gp_kthreads(void)
        struct task_struct *t = READ_ONCE(rcu_state.gp_kthread);
 
        j = jiffies;
-       ja = j - data_race(rcu_state.gp_activity);
-       jr = j - data_race(rcu_state.gp_req_activity);
-       js = j - data_race(rcu_state.gp_start);
-       jw = j - data_race(rcu_state.gp_wake_time);
+       ja = j - data_race(READ_ONCE(rcu_state.gp_activity));
+       jr = j - data_race(READ_ONCE(rcu_state.gp_req_activity));
+       js = j - data_race(READ_ONCE(rcu_state.gp_start));
+       jw = j - data_race(READ_ONCE(rcu_state.gp_wake_time));
        pr_info("%s: wait state: %s(%d) ->state: %#x ->rt_priority %u delta ->gp_start %lu ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_max %lu ->gp_flags %#x\n",
                rcu_state.name, gp_state_getname(rcu_state.gp_state),
-               rcu_state.gp_state, t ? t->__state : 0x1ffff, t ? t->rt_priority : 0xffU,
-               js, ja, jr, jw, (long)data_race(rcu_state.gp_wake_seq),
-               (long)data_race(rcu_state.gp_seq),
-               (long)data_race(rcu_get_root()->gp_seq_needed),
-               data_race(rcu_state.gp_max),
-               data_race(rcu_state.gp_flags));
+               data_race(READ_ONCE(rcu_state.gp_state)),
+               t ? data_race(READ_ONCE(t->__state)) : 0x1ffff, t ? t->rt_priority : 0xffU,
+               js, ja, jr, jw, (long)data_race(READ_ONCE(rcu_state.gp_wake_seq)),
+               (long)data_race(READ_ONCE(rcu_state.gp_seq)),
+               (long)data_race(READ_ONCE(rcu_get_root()->gp_seq_needed)),
+               data_race(READ_ONCE(rcu_state.gp_max)),
+               data_race(READ_ONCE(rcu_state.gp_flags)));
        rcu_for_each_node_breadth_first(rnp) {
                if (ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq), READ_ONCE(rnp->gp_seq_needed)) &&
-                   !data_race(rnp->qsmask) && !data_race(rnp->boost_tasks) &&
-                   !data_race(rnp->exp_tasks) && !data_race(rnp->gp_tasks))
+                   !data_race(READ_ONCE(rnp->qsmask)) && !data_race(READ_ONCE(rnp->boost_tasks)) &&
+                   !data_race(READ_ONCE(rnp->exp_tasks)) && !data_race(READ_ONCE(rnp->gp_tasks)))
                        continue;
                pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld ->qsmask %#lx %c%c%c%c ->n_boosts %ld\n",
                        rnp->grplo, rnp->grphi,
-                       (long)data_race(rnp->gp_seq), (long)data_race(rnp->gp_seq_needed),
-                       data_race(rnp->qsmask),
-                       ".b"[!!data_race(rnp->boost_kthread_task)],
-                       ".B"[!!data_race(rnp->boost_tasks)],
-                       ".E"[!!data_race(rnp->exp_tasks)],
-                       ".G"[!!data_race(rnp->gp_tasks)],
-                       data_race(rnp->n_boosts));
+                       (long)data_race(READ_ONCE(rnp->gp_seq)),
+                       (long)data_race(READ_ONCE(rnp->gp_seq_needed)),
+                       data_race(READ_ONCE(rnp->qsmask)),
+                       ".b"[!!data_race(READ_ONCE(rnp->boost_kthread_task))],
+                       ".B"[!!data_race(READ_ONCE(rnp->boost_tasks))],
+                       ".E"[!!data_race(READ_ONCE(rnp->exp_tasks))],
+                       ".G"[!!data_race(READ_ONCE(rnp->gp_tasks))],
+                       data_race(READ_ONCE(rnp->n_boosts)));
                if (!rcu_is_leaf_node(rnp))
                        continue;
                for_each_leaf_node_possible_cpu(rnp, cpu) {
@@ -826,12 +853,12 @@ void show_rcu_gp_kthreads(void)
                                         READ_ONCE(rdp->gp_seq_needed)))
                                continue;
                        pr_info("\tcpu %d ->gp_seq_needed %ld\n",
-                               cpu, (long)data_race(rdp->gp_seq_needed));
+                               cpu, (long)data_race(READ_ONCE(rdp->gp_seq_needed)));
                }
        }
        for_each_possible_cpu(cpu) {
                rdp = per_cpu_ptr(&rcu_data, cpu);
-               cbs += data_race(rdp->n_cbs_invoked);
+               cbs += data_race(READ_ONCE(rdp->n_cbs_invoked));
                if (rcu_segcblist_is_offloaded(&rdp->cblist))
                        show_rcu_nocb_state(rdp);
        }
@@ -913,11 +940,11 @@ void rcu_fwd_progress_check(unsigned long j)
 
        if (rcu_gp_in_progress()) {
                pr_info("%s: GP age %lu jiffies\n",
-                       __func__, jiffies - rcu_state.gp_start);
+                       __func__, jiffies - data_race(READ_ONCE(rcu_state.gp_start)));
                show_rcu_gp_kthreads();
        } else {
                pr_info("%s: Last GP end %lu jiffies ago\n",
-                       __func__, jiffies - rcu_state.gp_end);
+                       __func__, jiffies - data_race(READ_ONCE(rcu_state.gp_end)));
                preempt_disable();
                rdp = this_cpu_ptr(&rcu_data);
                rcu_check_gp_start_stall(rdp->mynode, rdp, j);
index 29e8fc5..64a0828 100644 (file)
@@ -64,6 +64,7 @@ torture_param(bool, use_cpus_read_lock, 0, "Use cpus_read_lock() to exclude CPU
 torture_param(int, verbose, 0, "Enable verbose debugging printk()s");
 torture_param(int, weight_resched, -1, "Testing weight for resched_cpu() operations.");
 torture_param(int, weight_single, -1, "Testing weight for single-CPU no-wait operations.");
+torture_param(int, weight_single_rpc, -1, "Testing weight for single-CPU RPC operations.");
 torture_param(int, weight_single_wait, -1, "Testing weight for single-CPU operations.");
 torture_param(int, weight_many, -1, "Testing weight for multi-CPU no-wait operations.");
 torture_param(int, weight_many_wait, -1, "Testing weight for multi-CPU operations.");
@@ -86,6 +87,8 @@ struct scf_statistics {
        long long n_resched;
        long long n_single;
        long long n_single_ofl;
+       long long n_single_rpc;
+       long long n_single_rpc_ofl;
        long long n_single_wait;
        long long n_single_wait_ofl;
        long long n_many;
@@ -101,14 +104,17 @@ static DEFINE_PER_CPU(long long, scf_invoked_count);
 // Data for random primitive selection
 #define SCF_PRIM_RESCHED       0
 #define SCF_PRIM_SINGLE                1
-#define SCF_PRIM_MANY          2
-#define SCF_PRIM_ALL           3
-#define SCF_NPRIMS             7 // Need wait and no-wait versions of each,
-                                 //  except for SCF_PRIM_RESCHED.
+#define SCF_PRIM_SINGLE_RPC    2
+#define SCF_PRIM_MANY          3
+#define SCF_PRIM_ALL           4
+#define SCF_NPRIMS             8 // Need wait and no-wait versions of each,
+                                 //  except for SCF_PRIM_RESCHED and
+                                 //  SCF_PRIM_SINGLE_RPC.
 
 static char *scf_prim_name[] = {
        "resched_cpu",
        "smp_call_function_single",
+       "smp_call_function_single_rpc",
        "smp_call_function_many",
        "smp_call_function",
 };
@@ -128,6 +134,8 @@ struct scf_check {
        bool scfc_out;
        int scfc_cpu; // -1 for not _single().
        bool scfc_wait;
+       bool scfc_rpc;
+       struct completion scfc_completion;
 };
 
 // Use to wait for all threads to start.
@@ -158,6 +166,7 @@ static void scf_torture_stats_print(void)
                scfs.n_resched += scf_stats_p[i].n_resched;
                scfs.n_single += scf_stats_p[i].n_single;
                scfs.n_single_ofl += scf_stats_p[i].n_single_ofl;
+               scfs.n_single_rpc += scf_stats_p[i].n_single_rpc;
                scfs.n_single_wait += scf_stats_p[i].n_single_wait;
                scfs.n_single_wait_ofl += scf_stats_p[i].n_single_wait_ofl;
                scfs.n_many += scf_stats_p[i].n_many;
@@ -168,9 +177,10 @@ static void scf_torture_stats_print(void)
        if (atomic_read(&n_errs) || atomic_read(&n_mb_in_errs) ||
            atomic_read(&n_mb_out_errs) || atomic_read(&n_alloc_errs))
                bangstr = "!!! ";
-       pr_alert("%s %sscf_invoked_count %s: %lld resched: %lld single: %lld/%lld single_ofl: %lld/%lld many: %lld/%lld all: %lld/%lld ",
+       pr_alert("%s %sscf_invoked_count %s: %lld resched: %lld single: %lld/%lld single_ofl: %lld/%lld single_rpc: %lld single_rpc_ofl: %lld many: %lld/%lld all: %lld/%lld ",
                 SCFTORT_FLAG, bangstr, isdone ? "VER" : "ver", invoked_count, scfs.n_resched,
                 scfs.n_single, scfs.n_single_wait, scfs.n_single_ofl, scfs.n_single_wait_ofl,
+                scfs.n_single_rpc, scfs.n_single_rpc_ofl,
                 scfs.n_many, scfs.n_many_wait, scfs.n_all, scfs.n_all_wait);
        torture_onoff_stats();
        pr_cont("ste: %d stnmie: %d stnmoe: %d staf: %d\n", atomic_read(&n_errs),
@@ -282,10 +292,13 @@ static void scf_handler(void *scfc_in)
 out:
        if (unlikely(!scfcp))
                return;
-       if (scfcp->scfc_wait)
+       if (scfcp->scfc_wait) {
                WRITE_ONCE(scfcp->scfc_out, true);
-       else
+               if (scfcp->scfc_rpc)
+                       complete(&scfcp->scfc_completion);
+       } else {
                kfree(scfcp);
+       }
 }
 
 // As above, but check for correct CPU.
@@ -319,6 +332,7 @@ static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_ra
                        scfcp->scfc_cpu = -1;
                        scfcp->scfc_wait = scfsp->scfs_wait;
                        scfcp->scfc_out = false;
+                       scfcp->scfc_rpc = false;
                }
        }
        switch (scfsp->scfs_prim) {
@@ -350,6 +364,34 @@ static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_ra
                        scfcp = NULL;
                }
                break;
+       case SCF_PRIM_SINGLE_RPC:
+               if (!scfcp)
+                       break;
+               cpu = torture_random(trsp) % nr_cpu_ids;
+               scfp->n_single_rpc++;
+               scfcp->scfc_cpu = cpu;
+               scfcp->scfc_wait = true;
+               init_completion(&scfcp->scfc_completion);
+               scfcp->scfc_rpc = true;
+               barrier(); // Prevent race-reduction compiler optimizations.
+               scfcp->scfc_in = true;
+               ret = smp_call_function_single(cpu, scf_handler_1, (void *)scfcp, 0);
+               if (!ret) {
+                       if (use_cpus_read_lock)
+                               cpus_read_unlock();
+                       else
+                               preempt_enable();
+                       wait_for_completion(&scfcp->scfc_completion);
+                       if (use_cpus_read_lock)
+                               cpus_read_lock();
+                       else
+                               preempt_disable();
+               } else {
+                       scfp->n_single_rpc_ofl++;
+                       kfree(scfcp);
+                       scfcp = NULL;
+               }
+               break;
        case SCF_PRIM_MANY:
                if (scfsp->scfs_wait)
                        scfp->n_many_wait++;
@@ -379,10 +421,12 @@ static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_ra
        }
        if (scfcp && scfsp->scfs_wait) {
                if (WARN_ON_ONCE((num_online_cpus() > 1 || scfsp->scfs_prim == SCF_PRIM_SINGLE) &&
-                                !scfcp->scfc_out))
+                                !scfcp->scfc_out)) {
+                       pr_warn("%s: Memory-ordering failure, scfs_prim: %d.\n", __func__, scfsp->scfs_prim);
                        atomic_inc(&n_mb_out_errs); // Leak rather than trash!
-               else
+               } else {
                        kfree(scfcp);
+               }
                barrier(); // Prevent race-reduction compiler optimizations.
        }
        if (use_cpus_read_lock)
@@ -453,8 +497,8 @@ static void
 scftorture_print_module_parms(const char *tag)
 {
        pr_alert(SCFTORT_FLAG
-                "--- %s:  verbose=%d holdoff=%d longwait=%d nthreads=%d onoff_holdoff=%d onoff_interval=%d shutdown_secs=%d stat_interval=%d stutter=%d use_cpus_read_lock=%d, weight_resched=%d, weight_single=%d, weight_single_wait=%d, weight_many=%d, weight_many_wait=%d, weight_all=%d, weight_all_wait=%d\n", tag,
-                verbose, holdoff, longwait, nthreads, onoff_holdoff, onoff_interval, shutdown, stat_interval, stutter, use_cpus_read_lock, weight_resched, weight_single, weight_single_wait, weight_many, weight_many_wait, weight_all, weight_all_wait);
+                "--- %s:  verbose=%d holdoff=%d longwait=%d nthreads=%d onoff_holdoff=%d onoff_interval=%d shutdown_secs=%d stat_interval=%d stutter=%d use_cpus_read_lock=%d, weight_resched=%d, weight_single=%d, weight_single_rpc=%d, weight_single_wait=%d, weight_many=%d, weight_many_wait=%d, weight_all=%d, weight_all_wait=%d\n", tag,
+                verbose, holdoff, longwait, nthreads, onoff_holdoff, onoff_interval, shutdown, stat_interval, stutter, use_cpus_read_lock, weight_resched, weight_single, weight_single_rpc, weight_single_wait, weight_many, weight_many_wait, weight_all, weight_all_wait);
 }
 
 static void scf_cleanup_handler(void *unused)
@@ -469,7 +513,7 @@ static void scf_torture_cleanup(void)
                return;
 
        WRITE_ONCE(scfdone, true);
-       if (nthreads)
+       if (nthreads && scf_stats_p)
                for (i = 0; i < nthreads; i++)
                        torture_stop_kthread("scftorture_invoker", scf_stats_p[i].task);
        else
@@ -497,6 +541,7 @@ static int __init scf_torture_init(void)
        int firsterr = 0;
        unsigned long weight_resched1 = weight_resched;
        unsigned long weight_single1 = weight_single;
+       unsigned long weight_single_rpc1 = weight_single_rpc;
        unsigned long weight_single_wait1 = weight_single_wait;
        unsigned long weight_many1 = weight_many;
        unsigned long weight_many_wait1 = weight_many_wait;
@@ -508,11 +553,13 @@ static int __init scf_torture_init(void)
 
        scftorture_print_module_parms("Start of test");
 
-       if (weight_resched == -1 && weight_single == -1 && weight_single_wait == -1 &&
+       if (weight_resched == -1 &&
+           weight_single == -1 && weight_single_rpc == -1 && weight_single_wait == -1 &&
            weight_many == -1 && weight_many_wait == -1 &&
            weight_all == -1 && weight_all_wait == -1) {
                weight_resched1 = 2 * nr_cpu_ids;
                weight_single1 = 2 * nr_cpu_ids;
+               weight_single_rpc1 = 2 * nr_cpu_ids;
                weight_single_wait1 = 2 * nr_cpu_ids;
                weight_many1 = 2;
                weight_many_wait1 = 2;
@@ -523,6 +570,8 @@ static int __init scf_torture_init(void)
                        weight_resched1 = 0;
                if (weight_single == -1)
                        weight_single1 = 0;
+               if (weight_single_rpc == -1)
+                       weight_single_rpc1 = 0;
                if (weight_single_wait == -1)
                        weight_single_wait1 = 0;
                if (weight_many == -1)
@@ -534,7 +583,7 @@ static int __init scf_torture_init(void)
                if (weight_all_wait == -1)
                        weight_all_wait1 = 0;
        }
-       if (weight_single1 == 0 && weight_single_wait1 == 0 &&
+       if (weight_single1 == 0 && weight_single_rpc1 == 0 && weight_single_wait1 == 0 &&
            weight_many1 == 0 && weight_many_wait1 == 0 &&
            weight_all1 == 0 && weight_all_wait1 == 0) {
                VERBOSE_SCFTORTOUT_ERRSTRING("all zero weights makes no sense");
@@ -546,6 +595,7 @@ static int __init scf_torture_init(void)
        else if (weight_resched1)
                VERBOSE_SCFTORTOUT_ERRSTRING("built as module, weight_resched ignored");
        scf_sel_add(weight_single1, SCF_PRIM_SINGLE, false);
+       scf_sel_add(weight_single_rpc1, SCF_PRIM_SINGLE_RPC, true);
        scf_sel_add(weight_single_wait1, SCF_PRIM_SINGLE, true);
        scf_sel_add(weight_many1, SCF_PRIM_MANY, false);
        scf_sel_add(weight_many_wait1, SCF_PRIM_MANY, true);
index 2d9ff40..c4462c4 100644 (file)
@@ -237,9 +237,30 @@ static DEFINE_MUTEX(sched_core_mutex);
 static atomic_t sched_core_count;
 static struct cpumask sched_core_mask;
 
+static void sched_core_lock(int cpu, unsigned long *flags)
+{
+       const struct cpumask *smt_mask = cpu_smt_mask(cpu);
+       int t, i = 0;
+
+       local_irq_save(*flags);
+       for_each_cpu(t, smt_mask)
+               raw_spin_lock_nested(&cpu_rq(t)->__lock, i++);
+}
+
+static void sched_core_unlock(int cpu, unsigned long *flags)
+{
+       const struct cpumask *smt_mask = cpu_smt_mask(cpu);
+       int t;
+
+       for_each_cpu(t, smt_mask)
+               raw_spin_unlock(&cpu_rq(t)->__lock);
+       local_irq_restore(*flags);
+}
+
 static void __sched_core_flip(bool enabled)
 {
-       int cpu, t, i;
+       unsigned long flags;
+       int cpu, t;
 
        cpus_read_lock();
 
@@ -250,19 +271,12 @@ static void __sched_core_flip(bool enabled)
        for_each_cpu(cpu, &sched_core_mask) {
                const struct cpumask *smt_mask = cpu_smt_mask(cpu);
 
-               i = 0;
-               local_irq_disable();
-               for_each_cpu(t, smt_mask) {
-                       /* supports up to SMT8 */
-                       raw_spin_lock_nested(&cpu_rq(t)->__lock, i++);
-               }
+               sched_core_lock(cpu, &flags);
 
                for_each_cpu(t, smt_mask)
                        cpu_rq(t)->core_enabled = enabled;
 
-               for_each_cpu(t, smt_mask)
-                       raw_spin_unlock(&cpu_rq(t)->__lock);
-               local_irq_enable();
+               sched_core_unlock(cpu, &flags);
 
                cpumask_andnot(&sched_core_mask, &sched_core_mask, smt_mask);
        }
@@ -993,6 +1007,7 @@ int get_nohz_timer_target(void)
 {
        int i, cpu = smp_processor_id(), default_cpu = -1;
        struct sched_domain *sd;
+       const struct cpumask *hk_mask;
 
        if (housekeeping_cpu(cpu, HK_FLAG_TIMER)) {
                if (!idle_cpu(cpu))
@@ -1000,10 +1015,11 @@ int get_nohz_timer_target(void)
                default_cpu = cpu;
        }
 
+       hk_mask = housekeeping_cpumask(HK_FLAG_TIMER);
+
        rcu_read_lock();
        for_each_domain(cpu, sd) {
-               for_each_cpu_and(i, sched_domain_span(sd),
-                       housekeeping_cpumask(HK_FLAG_TIMER)) {
+               for_each_cpu_and(i, sched_domain_span(sd), hk_mask) {
                        if (cpu == i)
                                continue;
 
@@ -1619,6 +1635,23 @@ static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p)
                uclamp_rq_dec_id(rq, p, clamp_id);
 }
 
+static inline void uclamp_rq_reinc_id(struct rq *rq, struct task_struct *p,
+                                     enum uclamp_id clamp_id)
+{
+       if (!p->uclamp[clamp_id].active)
+               return;
+
+       uclamp_rq_dec_id(rq, p, clamp_id);
+       uclamp_rq_inc_id(rq, p, clamp_id);
+
+       /*
+        * Make sure to clear the idle flag if we've transiently reached 0
+        * active tasks on rq.
+        */
+       if (clamp_id == UCLAMP_MAX && (rq->uclamp_flags & UCLAMP_FLAG_IDLE))
+               rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
+}
+
 static inline void
 uclamp_update_active(struct task_struct *p)
 {
@@ -1642,12 +1675,8 @@ uclamp_update_active(struct task_struct *p)
         * affecting a valid clamp bucket, the next time it's enqueued,
         * it will already see the updated clamp bucket value.
         */
-       for_each_clamp_id(clamp_id) {
-               if (p->uclamp[clamp_id].active) {
-                       uclamp_rq_dec_id(rq, p, clamp_id);
-                       uclamp_rq_inc_id(rq, p, clamp_id);
-               }
-       }
+       for_each_clamp_id(clamp_id)
+               uclamp_rq_reinc_id(rq, p, clamp_id);
 
        task_rq_unlock(rq, p, &rf);
 }
@@ -1981,12 +2010,18 @@ void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
        dequeue_task(rq, p, flags);
 }
 
-/*
- * __normal_prio - return the priority that is based on the static prio
- */
-static inline int __normal_prio(struct task_struct *p)
+static inline int __normal_prio(int policy, int rt_prio, int nice)
 {
-       return p->static_prio;
+       int prio;
+
+       if (dl_policy(policy))
+               prio = MAX_DL_PRIO - 1;
+       else if (rt_policy(policy))
+               prio = MAX_RT_PRIO - 1 - rt_prio;
+       else
+               prio = NICE_TO_PRIO(nice);
+
+       return prio;
 }
 
 /*
@@ -1998,15 +2033,7 @@ static inline int __normal_prio(struct task_struct *p)
  */
 static inline int normal_prio(struct task_struct *p)
 {
-       int prio;
-
-       if (task_has_dl_policy(p))
-               prio = MAX_DL_PRIO-1;
-       else if (task_has_rt_policy(p))
-               prio = MAX_RT_PRIO-1 - p->rt_priority;
-       else
-               prio = __normal_prio(p);
-       return prio;
+       return __normal_prio(p->policy, p->rt_priority, PRIO_TO_NICE(p->static_prio));
 }
 
 /*
@@ -2163,7 +2190,7 @@ static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
 
        /* Non kernel threads are not allowed during either online or offline. */
        if (!(p->flags & PF_KTHREAD))
-               return cpu_active(cpu);
+               return cpu_active(cpu) && task_cpu_possible(cpu, p);
 
        /* KTHREAD_IS_PER_CPU is always allowed. */
        if (kthread_is_per_cpu(p))
@@ -2470,6 +2497,34 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
        __do_set_cpus_allowed(p, new_mask, 0);
 }
 
+int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
+                     int node)
+{
+       if (!src->user_cpus_ptr)
+               return 0;
+
+       dst->user_cpus_ptr = kmalloc_node(cpumask_size(), GFP_KERNEL, node);
+       if (!dst->user_cpus_ptr)
+               return -ENOMEM;
+
+       cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr);
+       return 0;
+}
+
+static inline struct cpumask *clear_user_cpus_ptr(struct task_struct *p)
+{
+       struct cpumask *user_mask = NULL;
+
+       swap(p->user_cpus_ptr, user_mask);
+
+       return user_mask;
+}
+
+void release_user_cpus_ptr(struct task_struct *p)
+{
+       kfree(clear_user_cpus_ptr(p));
+}
+
 /*
  * This function is wildly self concurrent; here be dragons.
  *
@@ -2687,28 +2742,26 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
 }
 
 /*
- * Change a given task's CPU affinity. Migrate the thread to a
- * proper CPU and schedule it away if the CPU it's executing on
- * is removed from the allowed bitmask.
- *
- * NOTE: the caller must have a valid reference to the task, the
- * task must not exit() & deallocate itself prematurely. The
- * call is not atomic; no spinlocks may be held.
+ * Called with both p->pi_lock and rq->lock held; drops both before returning.
  */
-static int __set_cpus_allowed_ptr(struct task_struct *p,
-                                 const struct cpumask *new_mask,
-                                 u32 flags)
+static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
+                                        const struct cpumask *new_mask,
+                                        u32 flags,
+                                        struct rq *rq,
+                                        struct rq_flags *rf)
+       __releases(rq->lock)
+       __releases(p->pi_lock)
 {
+       const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p);
        const struct cpumask *cpu_valid_mask = cpu_active_mask;
+       bool kthread = p->flags & PF_KTHREAD;
+       struct cpumask *user_mask = NULL;
        unsigned int dest_cpu;
-       struct rq_flags rf;
-       struct rq *rq;
        int ret = 0;
 
-       rq = task_rq_lock(p, &rf);
        update_rq_clock(rq);
 
-       if (p->flags & PF_KTHREAD || is_migration_disabled(p)) {
+       if (kthread || is_migration_disabled(p)) {
                /*
                 * Kernel threads are allowed on online && !active CPUs,
                 * however, during cpu-hot-unplug, even these might get pushed
@@ -2722,6 +2775,11 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
                cpu_valid_mask = cpu_online_mask;
        }
 
+       if (!kthread && !cpumask_subset(new_mask, cpu_allowed_mask)) {
+               ret = -EINVAL;
+               goto out;
+       }
+
        /*
         * Must re-check here, to close a race against __kthread_bind(),
         * sched_setaffinity() is not guaranteed to observe the flag.
@@ -2756,20 +2814,178 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
 
        __do_set_cpus_allowed(p, new_mask, flags);
 
-       return affine_move_task(rq, p, &rf, dest_cpu, flags);
+       if (flags & SCA_USER)
+               user_mask = clear_user_cpus_ptr(p);
+
+       ret = affine_move_task(rq, p, rf, dest_cpu, flags);
+
+       kfree(user_mask);
+
+       return ret;
 
 out:
-       task_rq_unlock(rq, p, &rf);
+       task_rq_unlock(rq, p, rf);
 
        return ret;
 }
 
+/*
+ * Change a given task's CPU affinity. Migrate the thread to a
+ * proper CPU and schedule it away if the CPU it's executing on
+ * is removed from the allowed bitmask.
+ *
+ * NOTE: the caller must have a valid reference to the task, the
+ * task must not exit() & deallocate itself prematurely. The
+ * call is not atomic; no spinlocks may be held.
+ */
+static int __set_cpus_allowed_ptr(struct task_struct *p,
+                                 const struct cpumask *new_mask, u32 flags)
+{
+       struct rq_flags rf;
+       struct rq *rq;
+
+       rq = task_rq_lock(p, &rf);
+       return __set_cpus_allowed_ptr_locked(p, new_mask, flags, rq, &rf);
+}
+
 int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
 {
        return __set_cpus_allowed_ptr(p, new_mask, 0);
 }
 EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
 
+/*
+ * Change a given task's CPU affinity to the intersection of its current
+ * affinity mask and @subset_mask, writing the resulting mask to @new_mask
+ * and pointing @p->user_cpus_ptr to a copy of the old mask.
+ * If the resulting mask is empty, leave the affinity unchanged and return
+ * -EINVAL.
+ */
+static int restrict_cpus_allowed_ptr(struct task_struct *p,
+                                    struct cpumask *new_mask,
+                                    const struct cpumask *subset_mask)
+{
+       struct cpumask *user_mask = NULL;
+       struct rq_flags rf;
+       struct rq *rq;
+       int err;
+
+       if (!p->user_cpus_ptr) {
+               user_mask = kmalloc(cpumask_size(), GFP_KERNEL);
+               if (!user_mask)
+                       return -ENOMEM;
+       }
+
+       rq = task_rq_lock(p, &rf);
+
+       /*
+        * Forcefully restricting the affinity of a deadline task is
+        * likely to cause problems, so fail and noisily override the
+        * mask entirely.
+        */
+       if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
+               err = -EPERM;
+               goto err_unlock;
+       }
+
+       if (!cpumask_and(new_mask, &p->cpus_mask, subset_mask)) {
+               err = -EINVAL;
+               goto err_unlock;
+       }
+
+       /*
+        * We're about to butcher the task affinity, so keep track of what
+        * the user asked for in case we're able to restore it later on.
+        */
+       if (user_mask) {
+               cpumask_copy(user_mask, p->cpus_ptr);
+               p->user_cpus_ptr = user_mask;
+       }
+
+       return __set_cpus_allowed_ptr_locked(p, new_mask, 0, rq, &rf);
+
+err_unlock:
+       task_rq_unlock(rq, p, &rf);
+       kfree(user_mask);
+       return err;
+}
+
+/*
+ * Restrict the CPU affinity of task @p so that it is a subset of
+ * task_cpu_possible_mask() and point @p->user_cpu_ptr to a copy of the
+ * old affinity mask. If the resulting mask is empty, we warn and walk
+ * up the cpuset hierarchy until we find a suitable mask.
+ */
+void force_compatible_cpus_allowed_ptr(struct task_struct *p)
+{
+       cpumask_var_t new_mask;
+       const struct cpumask *override_mask = task_cpu_possible_mask(p);
+
+       alloc_cpumask_var(&new_mask, GFP_KERNEL);
+
+       /*
+        * __migrate_task() can fail silently in the face of concurrent
+        * offlining of the chosen destination CPU, so take the hotplug
+        * lock to ensure that the migration succeeds.
+        */
+       cpus_read_lock();
+       if (!cpumask_available(new_mask))
+               goto out_set_mask;
+
+       if (!restrict_cpus_allowed_ptr(p, new_mask, override_mask))
+               goto out_free_mask;
+
+       /*
+        * We failed to find a valid subset of the affinity mask for the
+        * task, so override it based on its cpuset hierarchy.
+        */
+       cpuset_cpus_allowed(p, new_mask);
+       override_mask = new_mask;
+
+out_set_mask:
+       if (printk_ratelimit()) {
+               printk_deferred("Overriding affinity for process %d (%s) to CPUs %*pbl\n",
+                               task_pid_nr(p), p->comm,
+                               cpumask_pr_args(override_mask));
+       }
+
+       WARN_ON(set_cpus_allowed_ptr(p, override_mask));
+out_free_mask:
+       cpus_read_unlock();
+       free_cpumask_var(new_mask);
+}
+
+static int
+__sched_setaffinity(struct task_struct *p, const struct cpumask *mask);
+
+/*
+ * Restore the affinity of a task @p which was previously restricted by a
+ * call to force_compatible_cpus_allowed_ptr(). This will clear (and free)
+ * @p->user_cpus_ptr.
+ *
+ * It is the caller's responsibility to serialise this with any calls to
+ * force_compatible_cpus_allowed_ptr(@p).
+ */
+void relax_compatible_cpus_allowed_ptr(struct task_struct *p)
+{
+       struct cpumask *user_mask = p->user_cpus_ptr;
+       unsigned long flags;
+
+       /*
+        * Try to restore the old affinity mask. If this fails, then
+        * we free the mask explicitly to avoid it being inherited across
+        * a subsequent fork().
+        */
+       if (!user_mask || !__sched_setaffinity(p, user_mask))
+               return;
+
+       raw_spin_lock_irqsave(&p->pi_lock, flags);
+       user_mask = clear_user_cpus_ptr(p);
+       raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+
+       kfree(user_mask);
+}
+
 void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
 {
 #ifdef CONFIG_SCHED_DEBUG
@@ -3114,9 +3330,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
 
                /* Look for allowed, online CPU in same node. */
                for_each_cpu(dest_cpu, nodemask) {
-                       if (!cpu_active(dest_cpu))
-                               continue;
-                       if (cpumask_test_cpu(dest_cpu, p->cpus_ptr))
+                       if (is_cpu_allowed(p, dest_cpu))
                                return dest_cpu;
                }
        }
@@ -3133,8 +3347,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
                /* No more Mr. Nice Guy. */
                switch (state) {
                case cpuset:
-                       if (IS_ENABLED(CONFIG_CPUSETS)) {
-                               cpuset_cpus_allowed_fallback(p);
+                       if (cpuset_cpus_allowed_fallback(p)) {
                                state = possible;
                                break;
                        }
@@ -3146,10 +3359,9 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
                         *
                         * More yuck to audit.
                         */
-                       do_set_cpus_allowed(p, cpu_possible_mask);
+                       do_set_cpus_allowed(p, task_cpu_possible_mask(p));
                        state = fail;
                        break;
-
                case fail:
                        BUG();
                        break;
@@ -3563,6 +3775,55 @@ static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
        rq_unlock(rq, &rf);
 }
 
+/*
+ * Invoked from try_to_wake_up() to check whether the task can be woken up.
+ *
+ * The caller holds p::pi_lock if p != current or has preemption
+ * disabled when p == current.
+ *
+ * The rules of PREEMPT_RT saved_state:
+ *
+ *   The related locking code always holds p::pi_lock when updating
+ *   p::saved_state, which means the code is fully serialized in both cases.
+ *
+ *   The lock wait and lock wakeups happen via TASK_RTLOCK_WAIT. No other
+ *   bits set. This allows to distinguish all wakeup scenarios.
+ */
+static __always_inline
+bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success)
+{
+       if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
+               WARN_ON_ONCE((state & TASK_RTLOCK_WAIT) &&
+                            state != TASK_RTLOCK_WAIT);
+       }
+
+       if (READ_ONCE(p->__state) & state) {
+               *success = 1;
+               return true;
+       }
+
+#ifdef CONFIG_PREEMPT_RT
+       /*
+        * Saved state preserves the task state across blocking on
+        * an RT lock.  If the state matches, set p::saved_state to
+        * TASK_RUNNING, but do not wake the task because it waits
+        * for a lock wakeup. Also indicate success because from
+        * the regular waker's point of view this has succeeded.
+        *
+        * After acquiring the lock the task will restore p::__state
+        * from p::saved_state which ensures that the regular
+        * wakeup is not lost. The restore will also set
+        * p::saved_state to TASK_RUNNING so any further tests will
+        * not result in false positives vs. @success
+        */
+       if (p->saved_state & state) {
+               p->saved_state = TASK_RUNNING;
+               *success = 1;
+       }
+#endif
+       return false;
+}
+
 /*
  * Notes on Program-Order guarantees on SMP systems.
  *
@@ -3702,10 +3963,9 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
                 *  - we're serialized against set_special_state() by virtue of
                 *    it disabling IRQs (this allows not taking ->pi_lock).
                 */
-               if (!(READ_ONCE(p->__state) & state))
+               if (!ttwu_state_match(p, state, &success))
                        goto out;
 
-               success = 1;
                trace_sched_waking(p);
                WRITE_ONCE(p->__state, TASK_RUNNING);
                trace_sched_wakeup(p);
@@ -3720,14 +3980,11 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
         */
        raw_spin_lock_irqsave(&p->pi_lock, flags);
        smp_mb__after_spinlock();
-       if (!(READ_ONCE(p->__state) & state))
+       if (!ttwu_state_match(p, state, &success))
                goto unlock;
 
        trace_sched_waking(p);
 
-       /* We're going to change ->state: */
-       success = 1;
-
        /*
         * Ensure we load p->on_rq _after_ p->state, otherwise it would
         * be possible to, falsely, observe p->on_rq == 0 and get stuck
@@ -4099,7 +4356,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
                } else if (PRIO_TO_NICE(p->static_prio) < 0)
                        p->static_prio = NICE_TO_PRIO(0);
 
-               p->prio = p->normal_prio = __normal_prio(p);
+               p->prio = p->normal_prio = p->static_prio;
                set_load_weight(p, false);
 
                /*
@@ -5662,11 +5919,9 @@ static bool try_steal_cookie(int this, int that)
                if (p->core_occupation > dst->idle->core_occupation)
                        goto next;
 
-               p->on_rq = TASK_ON_RQ_MIGRATING;
                deactivate_task(src, p, 0);
                set_task_cpu(p, this);
                activate_task(dst, p, 0);
-               p->on_rq = TASK_ON_RQ_QUEUED;
 
                resched_curr(dst);
 
@@ -5738,35 +5993,109 @@ void queue_core_balance(struct rq *rq)
        queue_balance_callback(rq, &per_cpu(core_balance_head, rq->cpu), sched_core_balance);
 }
 
-static inline void sched_core_cpu_starting(unsigned int cpu)
+static void sched_core_cpu_starting(unsigned int cpu)
 {
        const struct cpumask *smt_mask = cpu_smt_mask(cpu);
-       struct rq *rq, *core_rq = NULL;
-       int i;
+       struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
+       unsigned long flags;
+       int t;
 
-       core_rq = cpu_rq(cpu)->core;
+       sched_core_lock(cpu, &flags);
+
+       WARN_ON_ONCE(rq->core != rq);
+
+       /* if we're the first, we'll be our own leader */
+       if (cpumask_weight(smt_mask) == 1)
+               goto unlock;
 
-       if (!core_rq) {
-               for_each_cpu(i, smt_mask) {
-                       rq = cpu_rq(i);
-                       if (rq->core && rq->core == rq)
-                               core_rq = rq;
+       /* find the leader */
+       for_each_cpu(t, smt_mask) {
+               if (t == cpu)
+                       continue;
+               rq = cpu_rq(t);
+               if (rq->core == rq) {
+                       core_rq = rq;
+                       break;
                }
+       }
 
-               if (!core_rq)
-                       core_rq = cpu_rq(cpu);
+       if (WARN_ON_ONCE(!core_rq)) /* whoopsie */
+               goto unlock;
 
-               for_each_cpu(i, smt_mask) {
-                       rq = cpu_rq(i);
+       /* install and validate core_rq */
+       for_each_cpu(t, smt_mask) {
+               rq = cpu_rq(t);
 
-                       WARN_ON_ONCE(rq->core && rq->core != core_rq);
+               if (t == cpu)
                        rq->core = core_rq;
-               }
+
+               WARN_ON_ONCE(rq->core != core_rq);
        }
+
+unlock:
+       sched_core_unlock(cpu, &flags);
 }
+
+static void sched_core_cpu_deactivate(unsigned int cpu)
+{
+       const struct cpumask *smt_mask = cpu_smt_mask(cpu);
+       struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
+       unsigned long flags;
+       int t;
+
+       sched_core_lock(cpu, &flags);
+
+       /* if we're the last man standing, nothing to do */
+       if (cpumask_weight(smt_mask) == 1) {
+               WARN_ON_ONCE(rq->core != rq);
+               goto unlock;
+       }
+
+       /* if we're not the leader, nothing to do */
+       if (rq->core != rq)
+               goto unlock;
+
+       /* find a new leader */
+       for_each_cpu(t, smt_mask) {
+               if (t == cpu)
+                       continue;
+               core_rq = cpu_rq(t);
+               break;
+       }
+
+       if (WARN_ON_ONCE(!core_rq)) /* impossible */
+               goto unlock;
+
+       /* copy the shared state to the new leader */
+       core_rq->core_task_seq      = rq->core_task_seq;
+       core_rq->core_pick_seq      = rq->core_pick_seq;
+       core_rq->core_cookie        = rq->core_cookie;
+       core_rq->core_forceidle     = rq->core_forceidle;
+       core_rq->core_forceidle_seq = rq->core_forceidle_seq;
+
+       /* install new leader */
+       for_each_cpu(t, smt_mask) {
+               rq = cpu_rq(t);
+               rq->core = core_rq;
+       }
+
+unlock:
+       sched_core_unlock(cpu, &flags);
+}
+
+static inline void sched_core_cpu_dying(unsigned int cpu)
+{
+       struct rq *rq = cpu_rq(cpu);
+
+       if (rq->core != rq)
+               rq->core = rq;
+}
+
 #else /* !CONFIG_SCHED_CORE */
 
 static inline void sched_core_cpu_starting(unsigned int cpu) {}
+static inline void sched_core_cpu_deactivate(unsigned int cpu) {}
+static inline void sched_core_cpu_dying(unsigned int cpu) {}
 
 static struct task_struct *
 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
@@ -5776,6 +6105,24 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 
 #endif /* CONFIG_SCHED_CORE */
 
+/*
+ * Constants for the sched_mode argument of __schedule().
+ *
+ * The mode argument allows RT enabled kernels to differentiate a
+ * preemption from blocking on an 'sleeping' spin/rwlock. Note that
+ * SM_MASK_PREEMPT for !RT has all bits set, which allows the compiler to
+ * optimize the AND operation out and just check for zero.
+ */
+#define SM_NONE                        0x0
+#define SM_PREEMPT             0x1
+#define SM_RTLOCK_WAIT         0x2
+
+#ifndef CONFIG_PREEMPT_RT
+# define SM_MASK_PREEMPT       (~0U)
+#else
+# define SM_MASK_PREEMPT       SM_PREEMPT
+#endif
+
 /*
  * __schedule() is the main scheduler function.
  *
@@ -5815,7 +6162,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
  *
  * WARNING: must be called with preemption disabled!
  */
-static void __sched notrace __schedule(bool preempt)
+static void __sched notrace __schedule(unsigned int sched_mode)
 {
        struct task_struct *prev, *next;
        unsigned long *switch_count;
@@ -5828,13 +6175,13 @@ static void __sched notrace __schedule(bool preempt)
        rq = cpu_rq(cpu);
        prev = rq->curr;
 
-       schedule_debug(prev, preempt);
+       schedule_debug(prev, !!sched_mode);
 
        if (sched_feat(HRTICK) || sched_feat(HRTICK_DL))
                hrtick_clear(rq);
 
        local_irq_disable();
-       rcu_note_context_switch(preempt);
+       rcu_note_context_switch(!!sched_mode);
 
        /*
         * Make sure that signal_pending_state()->signal_pending() below
@@ -5868,7 +6215,7 @@ static void __sched notrace __schedule(bool preempt)
         *  - ptrace_{,un}freeze_traced() can change ->state underneath us.
         */
        prev_state = READ_ONCE(prev->__state);
-       if (!preempt && prev_state) {
+       if (!(sched_mode & SM_MASK_PREEMPT) && prev_state) {
                if (signal_pending_state(prev_state, prev)) {
                        WRITE_ONCE(prev->__state, TASK_RUNNING);
                } else {
@@ -5934,7 +6281,7 @@ static void __sched notrace __schedule(bool preempt)
                migrate_disable_switch(rq, prev);
                psi_sched_switch(prev, next, !task_on_rq_queued(prev));
 
-               trace_sched_switch(preempt, prev, next);
+               trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev, next);
 
                /* Also unlocks the rq: */
                rq = context_switch(rq, prev, next, &rf);
@@ -5955,7 +6302,7 @@ void __noreturn do_task_dead(void)
        /* Tell freezer to ignore us: */
        current->flags |= PF_NOFREEZE;
 
-       __schedule(false);
+       __schedule(SM_NONE);
        BUG();
 
        /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
@@ -6016,7 +6363,7 @@ asmlinkage __visible void __sched schedule(void)
        sched_submit_work(tsk);
        do {
                preempt_disable();
-               __schedule(false);
+               __schedule(SM_NONE);
                sched_preempt_enable_no_resched();
        } while (need_resched());
        sched_update_worker(tsk);
@@ -6044,7 +6391,7 @@ void __sched schedule_idle(void)
         */
        WARN_ON_ONCE(current->__state);
        do {
-               __schedule(false);
+               __schedule(SM_NONE);
        } while (need_resched());
 }
 
@@ -6079,6 +6426,18 @@ void __sched schedule_preempt_disabled(void)
        preempt_disable();
 }
 
+#ifdef CONFIG_PREEMPT_RT
+void __sched notrace schedule_rtlock(void)
+{
+       do {
+               preempt_disable();
+               __schedule(SM_RTLOCK_WAIT);
+               sched_preempt_enable_no_resched();
+       } while (need_resched());
+}
+NOKPROBE_SYMBOL(schedule_rtlock);
+#endif
+
 static void __sched notrace preempt_schedule_common(void)
 {
        do {
@@ -6097,7 +6456,7 @@ static void __sched notrace preempt_schedule_common(void)
                 */
                preempt_disable_notrace();
                preempt_latency_start(1);
-               __schedule(true);
+               __schedule(SM_PREEMPT);
                preempt_latency_stop(1);
                preempt_enable_no_resched_notrace();
 
@@ -6176,7 +6535,7 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
                 * an infinite recursion.
                 */
                prev_ctx = exception_enter();
-               __schedule(true);
+               __schedule(SM_PREEMPT);
                exception_exit(prev_ctx);
 
                preempt_latency_stop(1);
@@ -6325,7 +6684,7 @@ asmlinkage __visible void __sched preempt_schedule_irq(void)
        do {
                preempt_disable();
                local_irq_enable();
-               __schedule(true);
+               __schedule(SM_PREEMPT);
                local_irq_disable();
                sched_preempt_enable_no_resched();
        } while (need_resched());
@@ -6341,6 +6700,18 @@ int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flag
 }
 EXPORT_SYMBOL(default_wake_function);
 
+static void __setscheduler_prio(struct task_struct *p, int prio)
+{
+       if (dl_prio(prio))
+               p->sched_class = &dl_sched_class;
+       else if (rt_prio(prio))
+               p->sched_class = &rt_sched_class;
+       else
+               p->sched_class = &fair_sched_class;
+
+       p->prio = prio;
+}
+
 #ifdef CONFIG_RT_MUTEXES
 
 static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
@@ -6456,22 +6827,19 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
                } else {
                        p->dl.pi_se = &p->dl;
                }
-               p->sched_class = &dl_sched_class;
        } else if (rt_prio(prio)) {
                if (dl_prio(oldprio))
                        p->dl.pi_se = &p->dl;
                if (oldprio < prio)
                        queue_flag |= ENQUEUE_HEAD;
-               p->sched_class = &rt_sched_class;
        } else {
                if (dl_prio(oldprio))
                        p->dl.pi_se = &p->dl;
                if (rt_prio(oldprio))
                        p->rt.timeout = 0;
-               p->sched_class = &fair_sched_class;
        }
 
-       p->prio = prio;
+       __setscheduler_prio(p, prio);
 
        if (queued)
                enqueue_task(rq, p, queue_flag);
@@ -6824,35 +7192,6 @@ static void __setscheduler_params(struct task_struct *p,
        set_load_weight(p, true);
 }
 
-/* Actually do priority change: must hold pi & rq lock. */
-static void __setscheduler(struct rq *rq, struct task_struct *p,
-                          const struct sched_attr *attr, bool keep_boost)
-{
-       /*
-        * If params can't change scheduling class changes aren't allowed
-        * either.
-        */
-       if (attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)
-               return;
-
-       __setscheduler_params(p, attr);
-
-       /*
-        * Keep a potential priority boosting if called from
-        * sched_setscheduler().
-        */
-       p->prio = normal_prio(p);
-       if (keep_boost)
-               p->prio = rt_effective_prio(p, p->prio);
-
-       if (dl_prio(p->prio))
-               p->sched_class = &dl_sched_class;
-       else if (rt_prio(p->prio))
-               p->sched_class = &rt_sched_class;
-       else
-               p->sched_class = &fair_sched_class;
-}
-
 /*
  * Check the target process has a UID that matches the current process's:
  */
@@ -6873,10 +7212,8 @@ static int __sched_setscheduler(struct task_struct *p,
                                const struct sched_attr *attr,
                                bool user, bool pi)
 {
-       int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 :
-                     MAX_RT_PRIO - 1 - attr->sched_priority;
-       int retval, oldprio, oldpolicy = -1, queued, running;
-       int new_effective_prio, policy = attr->sched_policy;
+       int oldpolicy = -1, policy = attr->sched_policy;
+       int retval, oldprio, newprio, queued, running;
        const struct sched_class *prev_class;
        struct callback_head *head;
        struct rq_flags rf;
@@ -7074,6 +7411,7 @@ change:
        p->sched_reset_on_fork = reset_on_fork;
        oldprio = p->prio;
 
+       newprio = __normal_prio(policy, attr->sched_priority, attr->sched_nice);
        if (pi) {
                /*
                 * Take priority boosted tasks into account. If the new
@@ -7082,8 +7420,8 @@ change:
                 * the runqueue. This will be done when the task deboost
                 * itself.
                 */
-               new_effective_prio = rt_effective_prio(p, newprio);
-               if (new_effective_prio == oldprio)
+               newprio = rt_effective_prio(p, newprio);
+               if (newprio == oldprio)
                        queue_flags &= ~DEQUEUE_MOVE;
        }
 
@@ -7096,7 +7434,10 @@ change:
 
        prev_class = p->sched_class;
 
-       __setscheduler(rq, p, attr, pi);
+       if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) {
+               __setscheduler_params(p, attr);
+               __setscheduler_prio(p, newprio);
+       }
        __setscheduler_uclamp(p, attr);
 
        if (queued) {
@@ -7320,6 +7661,16 @@ err_size:
        return -E2BIG;
 }
 
+static void get_params(struct task_struct *p, struct sched_attr *attr)
+{
+       if (task_has_dl_policy(p))
+               __getparam_dl(p, attr);
+       else if (task_has_rt_policy(p))
+               attr->sched_priority = p->rt_priority;
+       else
+               attr->sched_nice = task_nice(p);
+}
+
 /**
  * sys_sched_setscheduler - set/change the scheduler policy and RT priority
  * @pid: the pid in question.
@@ -7381,6 +7732,8 @@ SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
        rcu_read_unlock();
 
        if (likely(p)) {
+               if (attr.sched_flags & SCHED_FLAG_KEEP_PARAMS)
+                       get_params(p, &attr);
                retval = sched_setattr(p, &attr);
                put_task_struct(p);
        }
@@ -7529,12 +7882,8 @@ SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
        kattr.sched_policy = p->policy;
        if (p->sched_reset_on_fork)
                kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
-       if (task_has_dl_policy(p))
-               __getparam_dl(p, &kattr);
-       else if (task_has_rt_policy(p))
-               kattr.sched_priority = p->rt_priority;
-       else
-               kattr.sched_nice = task_nice(p);
+       get_params(p, &kattr);
+       kattr.sched_flags &= SCHED_FLAG_ALL;
 
 #ifdef CONFIG_UCLAMP_TASK
        /*
@@ -7555,9 +7904,76 @@ out_unlock:
        return retval;
 }
 
-long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
+#ifdef CONFIG_SMP
+int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
+{
+       int ret = 0;
+
+       /*
+        * If the task isn't a deadline task or admission control is
+        * disabled then we don't care about affinity changes.
+        */
+       if (!task_has_dl_policy(p) || !dl_bandwidth_enabled())
+               return 0;
+
+       /*
+        * Since bandwidth control happens on root_domain basis,
+        * if admission test is enabled, we only admit -deadline
+        * tasks allowed to run on all the CPUs in the task's
+        * root_domain.
+        */
+       rcu_read_lock();
+       if (!cpumask_subset(task_rq(p)->rd->span, mask))
+               ret = -EBUSY;
+       rcu_read_unlock();
+       return ret;
+}
+#endif
+
+static int
+__sched_setaffinity(struct task_struct *p, const struct cpumask *mask)
 {
+       int retval;
        cpumask_var_t cpus_allowed, new_mask;
+
+       if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL))
+               return -ENOMEM;
+
+       if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
+               retval = -ENOMEM;
+               goto out_free_cpus_allowed;
+       }
+
+       cpuset_cpus_allowed(p, cpus_allowed);
+       cpumask_and(new_mask, mask, cpus_allowed);
+
+       retval = dl_task_check_affinity(p, new_mask);
+       if (retval)
+               goto out_free_new_mask;
+again:
+       retval = __set_cpus_allowed_ptr(p, new_mask, SCA_CHECK | SCA_USER);
+       if (retval)
+               goto out_free_new_mask;
+
+       cpuset_cpus_allowed(p, cpus_allowed);
+       if (!cpumask_subset(new_mask, cpus_allowed)) {
+               /*
+                * We must have raced with a concurrent cpuset update.
+                * Just reset the cpumask to the cpuset's cpus_allowed.
+                */
+               cpumask_copy(new_mask, cpus_allowed);
+               goto again;
+       }
+
+out_free_new_mask:
+       free_cpumask_var(new_mask);
+out_free_cpus_allowed:
+       free_cpumask_var(cpus_allowed);
+       return retval;
+}
+
+long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
+{
        struct task_struct *p;
        int retval;
 
@@ -7577,68 +7993,22 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
                retval = -EINVAL;
                goto out_put_task;
        }
-       if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
-               retval = -ENOMEM;
-               goto out_put_task;
-       }
-       if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
-               retval = -ENOMEM;
-               goto out_free_cpus_allowed;
-       }
-       retval = -EPERM;
+
        if (!check_same_owner(p)) {
                rcu_read_lock();
                if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
                        rcu_read_unlock();
-                       goto out_free_new_mask;
+                       retval = -EPERM;
+                       goto out_put_task;
                }
                rcu_read_unlock();
        }
 
        retval = security_task_setscheduler(p);
        if (retval)
-               goto out_free_new_mask;
-
-
-       cpuset_cpus_allowed(p, cpus_allowed);
-       cpumask_and(new_mask, in_mask, cpus_allowed);
-
-       /*
-        * Since bandwidth control happens on root_domain basis,
-        * if admission test is enabled, we only admit -deadline
-        * tasks allowed to run on all the CPUs in the task's
-        * root_domain.
-        */
-#ifdef CONFIG_SMP
-       if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
-               rcu_read_lock();
-               if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) {
-                       retval = -EBUSY;
-                       rcu_read_unlock();
-                       goto out_free_new_mask;
-               }
-               rcu_read_unlock();
-       }
-#endif
-again:
-       retval = __set_cpus_allowed_ptr(p, new_mask, SCA_CHECK);
+               goto out_put_task;
 
-       if (!retval) {
-               cpuset_cpus_allowed(p, cpus_allowed);
-               if (!cpumask_subset(new_mask, cpus_allowed)) {
-                       /*
-                        * We must have raced with a concurrent cpuset
-                        * update. Just reset the cpus_allowed to the
-                        * cpuset's cpus_allowed
-                        */
-                       cpumask_copy(new_mask, cpus_allowed);
-                       goto again;
-               }
-       }
-out_free_new_mask:
-       free_cpumask_var(new_mask);
-out_free_cpus_allowed:
-       free_cpumask_var(cpus_allowed);
+       retval = __sched_setaffinity(p, in_mask);
 out_put_task:
        put_task_struct(p);
        return retval;
@@ -7781,6 +8151,17 @@ int __sched __cond_resched(void)
                preempt_schedule_common();
                return 1;
        }
+       /*
+        * In preemptible kernels, ->rcu_read_lock_nesting tells the tick
+        * whether the current CPU is in an RCU read-side critical section,
+        * so the tick can report quiescent states even for CPUs looping
+        * in kernel context.  In contrast, in non-preemptible kernels,
+        * RCU readers leave no in-memory hints, which means that CPU-bound
+        * processes executing in kernel context might never report an
+        * RCU quiescent state.  Therefore, the following code causes
+        * cond_resched() to report a quiescent state, but only when RCU
+        * is in urgent need of one.
+        */
 #ifndef CONFIG_PREEMPT_RCU
        rcu_all_qs();
 #endif
@@ -8727,6 +9108,8 @@ int sched_cpu_deactivate(unsigned int cpu)
         */
        if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
                static_branch_dec_cpuslocked(&sched_smt_present);
+
+       sched_core_cpu_deactivate(cpu);
 #endif
 
        if (!sched_smp_initialized)
@@ -8831,6 +9214,7 @@ int sched_cpu_dying(unsigned int cpu)
        calc_load_migrate(rq);
        update_max_interval();
        hrtick_clear(rq);
+       sched_core_cpu_dying(cpu);
        return 0;
 }
 #endif
@@ -9042,7 +9426,7 @@ void __init sched_init(void)
                atomic_set(&rq->nr_iowait, 0);
 
 #ifdef CONFIG_SCHED_CORE
-               rq->core = NULL;
+               rq->core = rq;
                rq->core_pick = NULL;
                rq->core_enabled = 0;
                rq->core_tree = RB_ROOT;
@@ -9824,7 +10208,7 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota,
         * Prevent race between setting of cfs_rq->runtime_enabled and
         * unthrottle_offline_cfs_rqs().
         */
-       get_online_cpus();
+       cpus_read_lock();
        mutex_lock(&cfs_constraints_mutex);
        ret = __cfs_schedulable(tg, period, quota);
        if (ret)
@@ -9868,7 +10252,7 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota,
                cfs_bandwidth_usage_dec();
 out_unlock:
        mutex_unlock(&cfs_constraints_mutex);
-       put_online_cpus();
+       cpus_read_unlock();
 
        return ret;
 }
@@ -10119,6 +10503,20 @@ static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
 }
 #endif /* CONFIG_RT_GROUP_SCHED */
 
+#ifdef CONFIG_FAIR_GROUP_SCHED
+static s64 cpu_idle_read_s64(struct cgroup_subsys_state *css,
+                              struct cftype *cft)
+{
+       return css_tg(css)->idle;
+}
+
+static int cpu_idle_write_s64(struct cgroup_subsys_state *css,
+                               struct cftype *cft, s64 idle)
+{
+       return sched_group_set_idle(css_tg(css), idle);
+}
+#endif
+
 static struct cftype cpu_legacy_files[] = {
 #ifdef CONFIG_FAIR_GROUP_SCHED
        {
@@ -10126,6 +10524,11 @@ static struct cftype cpu_legacy_files[] = {
                .read_u64 = cpu_shares_read_u64,
                .write_u64 = cpu_shares_write_u64,
        },
+       {
+               .name = "idle",
+               .read_s64 = cpu_idle_read_s64,
+               .write_s64 = cpu_idle_write_s64,
+       },
 #endif
 #ifdef CONFIG_CFS_BANDWIDTH
        {
@@ -10333,6 +10736,12 @@ static struct cftype cpu_files[] = {
                .read_s64 = cpu_weight_nice_read_s64,
                .write_s64 = cpu_weight_nice_write_s64,
        },
+       {
+               .name = "idle",
+               .flags = CFTYPE_NOT_ON_ROOT,
+               .read_s64 = cpu_idle_read_s64,
+               .write_s64 = cpu_idle_write_s64,
+       },
 #endif
 #ifdef CONFIG_CFS_BANDWIDTH
        {
index aaacd6c..e943146 100644 (file)
@@ -1733,6 +1733,7 @@ static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused
         */
        raw_spin_rq_lock(rq);
        if (p->dl.dl_non_contending) {
+               update_rq_clock(rq);
                sub_running_bw(&p->dl, &rq->dl);
                p->dl.dl_non_contending = 0;
                /*
@@ -2741,7 +2742,7 @@ void __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
        dl_se->dl_runtime = attr->sched_runtime;
        dl_se->dl_deadline = attr->sched_deadline;
        dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
-       dl_se->flags = attr->sched_flags;
+       dl_se->flags = attr->sched_flags & SCHED_DL_FLAGS;
        dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
        dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
 }
@@ -2754,7 +2755,8 @@ void __getparam_dl(struct task_struct *p, struct sched_attr *attr)
        attr->sched_runtime = dl_se->dl_runtime;
        attr->sched_deadline = dl_se->dl_deadline;
        attr->sched_period = dl_se->dl_period;
-       attr->sched_flags = dl_se->flags;
+       attr->sched_flags &= ~SCHED_DL_FLAGS;
+       attr->sched_flags |= dl_se->flags;
 }
 
 /*
@@ -2851,7 +2853,7 @@ bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
        if (dl_se->dl_runtime != attr->sched_runtime ||
            dl_se->dl_deadline != attr->sched_deadline ||
            dl_se->dl_period != attr->sched_period ||
-           dl_se->flags != attr->sched_flags)
+           dl_se->flags != (attr->sched_flags & SCHED_DL_FLAGS))
                return true;
 
        return false;
index 0c5ec27..4971622 100644 (file)
@@ -388,6 +388,13 @@ void update_sched_domain_debugfs(void)
 {
        int cpu, i;
 
+       /*
+        * This can unfortunately be invoked before sched_debug_init() creates
+        * the debug directory. Don't touch sd_sysctl_cpus until then.
+        */
+       if (!debugfs_sched)
+               return;
+
        if (!cpumask_available(sd_sysctl_cpus)) {
                if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
                        return;
@@ -600,6 +607,9 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
        SEQ_printf(m, "  .%-30s: %d\n", "nr_spread_over",
                        cfs_rq->nr_spread_over);
        SEQ_printf(m, "  .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
+       SEQ_printf(m, "  .%-30s: %d\n", "h_nr_running", cfs_rq->h_nr_running);
+       SEQ_printf(m, "  .%-30s: %d\n", "idle_h_nr_running",
+                       cfs_rq->idle_h_nr_running);
        SEQ_printf(m, "  .%-30s: %ld\n", "load", cfs_rq->load.weight);
 #ifdef CONFIG_SMP
        SEQ_printf(m, "  .%-30s: %lu\n", "load_avg",
index 44c4520..ff69f24 100644 (file)
@@ -431,6 +431,23 @@ find_matching_se(struct sched_entity **se, struct sched_entity **pse)
        }
 }
 
+static int tg_is_idle(struct task_group *tg)
+{
+       return tg->idle > 0;
+}
+
+static int cfs_rq_is_idle(struct cfs_rq *cfs_rq)
+{
+       return cfs_rq->idle > 0;
+}
+
+static int se_is_idle(struct sched_entity *se)
+{
+       if (entity_is_task(se))
+               return task_has_idle_policy(task_of(se));
+       return cfs_rq_is_idle(group_cfs_rq(se));
+}
+
 #else  /* !CONFIG_FAIR_GROUP_SCHED */
 
 #define for_each_sched_entity(se) \
@@ -468,6 +485,21 @@ find_matching_se(struct sched_entity **se, struct sched_entity **pse)
 {
 }
 
+static inline int tg_is_idle(struct task_group *tg)
+{
+       return 0;
+}
+
+static int cfs_rq_is_idle(struct cfs_rq *cfs_rq)
+{
+       return 0;
+}
+
+static int se_is_idle(struct sched_entity *se)
+{
+       return 0;
+}
+
 #endif /* CONFIG_FAIR_GROUP_SCHED */
 
 static __always_inline
@@ -1486,7 +1518,7 @@ static inline bool is_core_idle(int cpu)
                if (cpu == sibling)
                        continue;
 
-               if (!idle_cpu(cpu))
+               if (!idle_cpu(sibling))
                        return false;
        }
 #endif
@@ -4841,6 +4873,9 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
 
                dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
 
+               if (cfs_rq_is_idle(group_cfs_rq(se)))
+                       idle_task_delta = cfs_rq->h_nr_running;
+
                qcfs_rq->h_nr_running -= task_delta;
                qcfs_rq->idle_h_nr_running -= idle_task_delta;
 
@@ -4860,6 +4895,9 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
                update_load_avg(qcfs_rq, se, 0);
                se_update_runnable(se);
 
+               if (cfs_rq_is_idle(group_cfs_rq(se)))
+                       idle_task_delta = cfs_rq->h_nr_running;
+
                qcfs_rq->h_nr_running -= task_delta;
                qcfs_rq->idle_h_nr_running -= idle_task_delta;
        }
@@ -4904,39 +4942,45 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
        task_delta = cfs_rq->h_nr_running;
        idle_task_delta = cfs_rq->idle_h_nr_running;
        for_each_sched_entity(se) {
+               struct cfs_rq *qcfs_rq = cfs_rq_of(se);
+
                if (se->on_rq)
                        break;
-               cfs_rq = cfs_rq_of(se);
-               enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
+               enqueue_entity(qcfs_rq, se, ENQUEUE_WAKEUP);
+
+               if (cfs_rq_is_idle(group_cfs_rq(se)))
+                       idle_task_delta = cfs_rq->h_nr_running;
 
-               cfs_rq->h_nr_running += task_delta;
-               cfs_rq->idle_h_nr_running += idle_task_delta;
+               qcfs_rq->h_nr_running += task_delta;
+               qcfs_rq->idle_h_nr_running += idle_task_delta;
 
                /* end evaluation on encountering a throttled cfs_rq */
-               if (cfs_rq_throttled(cfs_rq))
+               if (cfs_rq_throttled(qcfs_rq))
                        goto unthrottle_throttle;
        }
 
        for_each_sched_entity(se) {
-               cfs_rq = cfs_rq_of(se);
+               struct cfs_rq *qcfs_rq = cfs_rq_of(se);
 
-               update_load_avg(cfs_rq, se, UPDATE_TG);
+               update_load_avg(qcfs_rq, se, UPDATE_TG);
                se_update_runnable(se);
 
-               cfs_rq->h_nr_running += task_delta;
-               cfs_rq->idle_h_nr_running += idle_task_delta;
+               if (cfs_rq_is_idle(group_cfs_rq(se)))
+                       idle_task_delta = cfs_rq->h_nr_running;
 
+               qcfs_rq->h_nr_running += task_delta;
+               qcfs_rq->idle_h_nr_running += idle_task_delta;
 
                /* end evaluation on encountering a throttled cfs_rq */
-               if (cfs_rq_throttled(cfs_rq))
+               if (cfs_rq_throttled(qcfs_rq))
                        goto unthrottle_throttle;
 
                /*
                 * One parent has been throttled and cfs_rq removed from the
                 * list. Add it back to not break the leaf list.
                 */
-               if (throttled_hierarchy(cfs_rq))
-                       list_add_leaf_cfs_rq(cfs_rq);
+               if (throttled_hierarchy(qcfs_rq))
+                       list_add_leaf_cfs_rq(qcfs_rq);
        }
 
        /* At this point se is NULL and we are at root level*/
@@ -4949,9 +4993,9 @@ unthrottle_throttle:
         * assertion below.
         */
        for_each_sched_entity(se) {
-               cfs_rq = cfs_rq_of(se);
+               struct cfs_rq *qcfs_rq = cfs_rq_of(se);
 
-               if (list_add_leaf_cfs_rq(cfs_rq))
+               if (list_add_leaf_cfs_rq(qcfs_rq))
                        break;
        }
 
@@ -5574,6 +5618,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
                cfs_rq->h_nr_running++;
                cfs_rq->idle_h_nr_running += idle_h_nr_running;
 
+               if (cfs_rq_is_idle(cfs_rq))
+                       idle_h_nr_running = 1;
+
                /* end evaluation on encountering a throttled cfs_rq */
                if (cfs_rq_throttled(cfs_rq))
                        goto enqueue_throttle;
@@ -5591,6 +5638,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
                cfs_rq->h_nr_running++;
                cfs_rq->idle_h_nr_running += idle_h_nr_running;
 
+               if (cfs_rq_is_idle(cfs_rq))
+                       idle_h_nr_running = 1;
+
                /* end evaluation on encountering a throttled cfs_rq */
                if (cfs_rq_throttled(cfs_rq))
                        goto enqueue_throttle;
@@ -5668,6 +5718,9 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
                cfs_rq->h_nr_running--;
                cfs_rq->idle_h_nr_running -= idle_h_nr_running;
 
+               if (cfs_rq_is_idle(cfs_rq))
+                       idle_h_nr_running = 1;
+
                /* end evaluation on encountering a throttled cfs_rq */
                if (cfs_rq_throttled(cfs_rq))
                        goto dequeue_throttle;
@@ -5697,6 +5750,9 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
                cfs_rq->h_nr_running--;
                cfs_rq->idle_h_nr_running -= idle_h_nr_running;
 
+               if (cfs_rq_is_idle(cfs_rq))
+                       idle_h_nr_running = 1;
+
                /* end evaluation on encountering a throttled cfs_rq */
                if (cfs_rq_throttled(cfs_rq))
                        goto dequeue_throttle;
@@ -6249,7 +6305,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool
                time = cpu_clock(this);
        }
 
-       for_each_cpu_wrap(cpu, cpus, target) {
+       for_each_cpu_wrap(cpu, cpus, target + 1) {
                if (has_idle_core) {
                        i = select_idle_core(p, cpu, cpus, &idle_cpu);
                        if ((unsigned int)i < nr_cpumask_bits)
@@ -6376,6 +6432,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
 
        /* Check a recently used CPU as a potential idle candidate: */
        recent_used_cpu = p->recent_used_cpu;
+       p->recent_used_cpu = prev;
        if (recent_used_cpu != prev &&
            recent_used_cpu != target &&
            cpus_share_cache(recent_used_cpu, target) &&
@@ -6902,9 +6959,6 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
        } else if (wake_flags & WF_TTWU) { /* XXX always ? */
                /* Fast path */
                new_cpu = select_idle_sibling(p, prev_cpu, new_cpu);
-
-               if (want_affine)
-                       current->recent_used_cpu = cpu;
        }
        rcu_read_unlock();
 
@@ -7041,24 +7095,22 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
 
 static void set_last_buddy(struct sched_entity *se)
 {
-       if (entity_is_task(se) && unlikely(task_has_idle_policy(task_of(se))))
-               return;
-
        for_each_sched_entity(se) {
                if (SCHED_WARN_ON(!se->on_rq))
                        return;
+               if (se_is_idle(se))
+                       return;
                cfs_rq_of(se)->last = se;
        }
 }
 
 static void set_next_buddy(struct sched_entity *se)
 {
-       if (entity_is_task(se) && unlikely(task_has_idle_policy(task_of(se))))
-               return;
-
        for_each_sched_entity(se) {
                if (SCHED_WARN_ON(!se->on_rq))
                        return;
+               if (se_is_idle(se))
+                       return;
                cfs_rq_of(se)->next = se;
        }
 }
@@ -7079,6 +7131,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
        struct cfs_rq *cfs_rq = task_cfs_rq(curr);
        int scale = cfs_rq->nr_running >= sched_nr_latency;
        int next_buddy_marked = 0;
+       int cse_is_idle, pse_is_idle;
 
        if (unlikely(se == pse))
                return;
@@ -7123,8 +7176,21 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
                return;
 
        find_matching_se(&se, &pse);
-       update_curr(cfs_rq_of(se));
        BUG_ON(!pse);
+
+       cse_is_idle = se_is_idle(se);
+       pse_is_idle = se_is_idle(pse);
+
+       /*
+        * Preempt an idle group in favor of a non-idle group (and don't preempt
+        * in the inverse case).
+        */
+       if (cse_is_idle && !pse_is_idle)
+               goto preempt;
+       if (cse_is_idle != pse_is_idle)
+               return;
+
+       update_curr(cfs_rq_of(se));
        if (wakeup_preempt_entity(se, pse) == 1) {
                /*
                 * Bias pick_next to pick the sched entity that is
@@ -10217,9 +10283,11 @@ static inline int on_null_domain(struct rq *rq)
 static inline int find_new_ilb(void)
 {
        int ilb;
+       const struct cpumask *hk_mask;
+
+       hk_mask = housekeeping_cpumask(HK_FLAG_MISC);
 
-       for_each_cpu_and(ilb, nohz.idle_cpus_mask,
-                             housekeeping_cpumask(HK_FLAG_MISC)) {
+       for_each_cpu_and(ilb, nohz.idle_cpus_mask, hk_mask) {
 
                if (ilb == smp_processor_id())
                        continue;
@@ -11416,10 +11484,12 @@ void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
 
 static DEFINE_MUTEX(shares_mutex);
 
-int sched_group_set_shares(struct task_group *tg, unsigned long shares)
+static int __sched_group_set_shares(struct task_group *tg, unsigned long shares)
 {
        int i;
 
+       lockdep_assert_held(&shares_mutex);
+
        /*
         * We can't change the weight of the root cgroup.
         */
@@ -11428,9 +11498,8 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
 
        shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
 
-       mutex_lock(&shares_mutex);
        if (tg->shares == shares)
-               goto done;
+               return 0;
 
        tg->shares = shares;
        for_each_possible_cpu(i) {
@@ -11448,10 +11517,88 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
                rq_unlock_irqrestore(rq, &rf);
        }
 
-done:
+       return 0;
+}
+
+int sched_group_set_shares(struct task_group *tg, unsigned long shares)
+{
+       int ret;
+
+       mutex_lock(&shares_mutex);
+       if (tg_is_idle(tg))
+               ret = -EINVAL;
+       else
+               ret = __sched_group_set_shares(tg, shares);
+       mutex_unlock(&shares_mutex);
+
+       return ret;
+}
+
+int sched_group_set_idle(struct task_group *tg, long idle)
+{
+       int i;
+
+       if (tg == &root_task_group)
+               return -EINVAL;
+
+       if (idle < 0 || idle > 1)
+               return -EINVAL;
+
+       mutex_lock(&shares_mutex);
+
+       if (tg->idle == idle) {
+               mutex_unlock(&shares_mutex);
+               return 0;
+       }
+
+       tg->idle = idle;
+
+       for_each_possible_cpu(i) {
+               struct rq *rq = cpu_rq(i);
+               struct sched_entity *se = tg->se[i];
+               struct cfs_rq *grp_cfs_rq = tg->cfs_rq[i];
+               bool was_idle = cfs_rq_is_idle(grp_cfs_rq);
+               long idle_task_delta;
+               struct rq_flags rf;
+
+               rq_lock_irqsave(rq, &rf);
+
+               grp_cfs_rq->idle = idle;
+               if (WARN_ON_ONCE(was_idle == cfs_rq_is_idle(grp_cfs_rq)))
+                       goto next_cpu;
+
+               idle_task_delta = grp_cfs_rq->h_nr_running -
+                                 grp_cfs_rq->idle_h_nr_running;
+               if (!cfs_rq_is_idle(grp_cfs_rq))
+                       idle_task_delta *= -1;
+
+               for_each_sched_entity(se) {
+                       struct cfs_rq *cfs_rq = cfs_rq_of(se);
+
+                       if (!se->on_rq)
+                               break;
+
+                       cfs_rq->idle_h_nr_running += idle_task_delta;
+
+                       /* Already accounted at parent level and above. */
+                       if (cfs_rq_is_idle(cfs_rq))
+                               break;
+               }
+
+next_cpu:
+               rq_unlock_irqrestore(rq, &rf);
+       }
+
+       /* Idle groups have minimum weight. */
+       if (tg_is_idle(tg))
+               __sched_group_set_shares(tg, scale_load(WEIGHT_IDLEPRIO));
+       else
+               __sched_group_set_shares(tg, NICE_0_LOAD);
+
        mutex_unlock(&shares_mutex);
        return 0;
 }
+
 #else /* CONFIG_FAIR_GROUP_SCHED */
 
 void free_fair_sched_group(struct task_group *tg) { }
index 14a41a2..3d3e579 100644 (file)
@@ -227,6 +227,8 @@ static inline void update_avg(u64 *avg, u64 sample)
  */
 #define SCHED_FLAG_SUGOV       0x10000000
 
+#define SCHED_DL_FLAGS (SCHED_FLAG_RECLAIM | SCHED_FLAG_DL_OVERRUN | SCHED_FLAG_SUGOV)
+
 static inline bool dl_entity_is_special(struct sched_dl_entity *dl_se)
 {
 #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
@@ -394,6 +396,9 @@ struct task_group {
        struct cfs_rq           **cfs_rq;
        unsigned long           shares;
 
+       /* A positive value indicates that this is a SCHED_IDLE group. */
+       int                     idle;
+
 #ifdef CONFIG_SMP
        /*
         * load_avg can be heavily contended at clock tick time, so put
@@ -503,6 +508,8 @@ extern void sched_move_task(struct task_struct *tsk);
 #ifdef CONFIG_FAIR_GROUP_SCHED
 extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
 
+extern int sched_group_set_idle(struct task_group *tg, long idle);
+
 #ifdef CONFIG_SMP
 extern void set_task_rq_fair(struct sched_entity *se,
                             struct cfs_rq *prev, struct cfs_rq *next);
@@ -599,6 +606,9 @@ struct cfs_rq {
        struct list_head        leaf_cfs_rq_list;
        struct task_group       *tg;    /* group that "owns" this runqueue */
 
+       /* Locally cached copy of our task_group's idle value */
+       int                     idle;
+
 #ifdef CONFIG_CFS_BANDWIDTH
        int                     runtime_enabled;
        s64                     runtime_remaining;
@@ -1093,7 +1103,7 @@ struct rq {
        unsigned int            core_sched_seq;
        struct rb_root          core_tree;
 
-       /* shared state */
+       /* shared state -- careful with sched_core_cpu_deactivate() */
        unsigned int            core_task_seq;
        unsigned int            core_pick_seq;
        unsigned long           core_cookie;
@@ -2234,6 +2244,7 @@ extern struct task_struct *pick_next_task_idle(struct rq *rq);
 #define SCA_CHECK              0x01
 #define SCA_MIGRATE_DISABLE    0x02
 #define SCA_MIGRATE_ENABLE     0x04
+#define SCA_USER               0x08
 
 #ifdef CONFIG_SMP
 
@@ -2255,6 +2266,9 @@ static inline struct task_struct *get_push_task(struct rq *rq)
        if (p->nr_cpus_allowed == 1)
                return NULL;
 
+       if (p->migration_disabled)
+               return NULL;
+
        rq->push_busy = true;
        return get_task_struct(p);
 }
@@ -2385,6 +2399,21 @@ extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
 extern const_debug unsigned int sysctl_sched_nr_migrate;
 extern const_debug unsigned int sysctl_sched_migration_cost;
 
+#ifdef CONFIG_SCHED_DEBUG
+extern unsigned int sysctl_sched_latency;
+extern unsigned int sysctl_sched_min_granularity;
+extern unsigned int sysctl_sched_wakeup_granularity;
+extern int sysctl_resched_latency_warn_ms;
+extern int sysctl_resched_latency_warn_once;
+
+extern unsigned int sysctl_sched_tunable_scaling;
+
+extern unsigned int sysctl_numa_balancing_scan_delay;
+extern unsigned int sysctl_numa_balancing_scan_period_min;
+extern unsigned int sysctl_numa_balancing_scan_period_max;
+extern unsigned int sysctl_numa_balancing_scan_size;
+#endif
+
 #ifdef CONFIG_SCHED_HRTICK
 
 /*
index b77ad49..4e8698e 100644 (file)
@@ -1482,6 +1482,8 @@ int                               sched_max_numa_distance;
 static int                     *sched_domains_numa_distance;
 static struct cpumask          ***sched_domains_numa_masks;
 int __read_mostly              node_reclaim_distance = RECLAIM_DISTANCE;
+
+static unsigned long __read_mostly *sched_numa_onlined_nodes;
 #endif
 
 /*
@@ -1833,6 +1835,16 @@ void sched_init_numa(void)
                        sched_domains_numa_masks[i][j] = mask;
 
                        for_each_node(k) {
+                               /*
+                                * Distance information can be unreliable for
+                                * offline nodes, defer building the node
+                                * masks to its bringup.
+                                * This relies on all unique distance values
+                                * still being visible at init time.
+                                */
+                               if (!node_online(j))
+                                       continue;
+
                                if (sched_debug() && (node_distance(j, k) != node_distance(k, j)))
                                        sched_numa_warn("Node-distance not symmetric");
 
@@ -1886,6 +1898,53 @@ void sched_init_numa(void)
        sched_max_numa_distance = sched_domains_numa_distance[nr_levels - 1];
 
        init_numa_topology_type();
+
+       sched_numa_onlined_nodes = bitmap_alloc(nr_node_ids, GFP_KERNEL);
+       if (!sched_numa_onlined_nodes)
+               return;
+
+       bitmap_zero(sched_numa_onlined_nodes, nr_node_ids);
+       for_each_online_node(i)
+               bitmap_set(sched_numa_onlined_nodes, i, 1);
+}
+
+static void __sched_domains_numa_masks_set(unsigned int node)
+{
+       int i, j;
+
+       /*
+        * NUMA masks are not built for offline nodes in sched_init_numa().
+        * Thus, when a CPU of a never-onlined-before node gets plugged in,
+        * adding that new CPU to the right NUMA masks is not sufficient: the
+        * masks of that CPU's node must also be updated.
+        */
+       if (test_bit(node, sched_numa_onlined_nodes))
+               return;
+
+       bitmap_set(sched_numa_onlined_nodes, node, 1);
+
+       for (i = 0; i < sched_domains_numa_levels; i++) {
+               for (j = 0; j < nr_node_ids; j++) {
+                       if (!node_online(j) || node == j)
+                               continue;
+
+                       if (node_distance(j, node) > sched_domains_numa_distance[i])
+                               continue;
+
+                       /* Add remote nodes in our masks */
+                       cpumask_or(sched_domains_numa_masks[i][node],
+                                  sched_domains_numa_masks[i][node],
+                                  sched_domains_numa_masks[0][j]);
+               }
+       }
+
+       /*
+        * A new node has been brought up, potentially changing the topology
+        * classification.
+        *
+        * Note that this is racy vs any use of sched_numa_topology_type :/
+        */
+       init_numa_topology_type();
 }
 
 void sched_domains_numa_masks_set(unsigned int cpu)
@@ -1893,8 +1952,14 @@ void sched_domains_numa_masks_set(unsigned int cpu)
        int node = cpu_to_node(cpu);
        int i, j;
 
+       __sched_domains_numa_masks_set(node);
+
        for (i = 0; i < sched_domains_numa_levels; i++) {
                for (j = 0; j < nr_node_ids; j++) {
+                       if (!node_online(j))
+                               continue;
+
+                       /* Set ourselves in the remote node's masks */
                        if (node_distance(j, node) <= sched_domains_numa_distance[i])
                                cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]);
                }
index 057e17f..6469eca 100644 (file)
@@ -602,7 +602,7 @@ static inline void seccomp_sync_threads(unsigned long flags)
                smp_store_release(&thread->seccomp.filter,
                                  caller->seccomp.filter);
                atomic_set(&thread->seccomp.filter_count,
-                          atomic_read(&thread->seccomp.filter_count));
+                          atomic_read(&caller->seccomp.filter_count));
 
                /*
                 * Don't let an unprivileged task work around
index a3229ad..52b6abe 100644 (file)
@@ -1413,6 +1413,21 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
        return sighand;
 }
 
+#ifdef CONFIG_LOCKDEP
+void lockdep_assert_task_sighand_held(struct task_struct *task)
+{
+       struct sighand_struct *sighand;
+
+       rcu_read_lock();
+       sighand = rcu_dereference(task->sighand);
+       if (sighand)
+               lockdep_assert_held(&sighand->siglock);
+       else
+               WARN_ON_ONCE(1);
+       rcu_read_unlock();
+}
+#endif
+
 /*
  * send signal info to all the members of a group
  */
index 52bf159..f43ede0 100644 (file)
@@ -764,7 +764,7 @@ int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
 EXPORT_SYMBOL(smp_call_function_single);
 
 /**
- * smp_call_function_single_async(): Run an asynchronous function on a
+ * smp_call_function_single_async() - Run an asynchronous function on a
  *                              specific CPU.
  * @cpu: The CPU to run on.
  * @csd: Pre-allocated and setup data structure
@@ -783,6 +783,8 @@ EXPORT_SYMBOL(smp_call_function_single);
  *
  * NOTE: Be careful, there is unfortunately no current debugging facility to
  * validate the correctness of this serialization.
+ *
+ * Return: %0 on success or negative errno value on error
  */
 int smp_call_function_single_async(int cpu, struct __call_single_data *csd)
 {
@@ -974,7 +976,7 @@ static void smp_call_function_many_cond(const struct cpumask *mask,
  * @mask: The set of cpus to run on (only runs on online subset).
  * @func: The function to run. This must be fast and non-blocking.
  * @info: An arbitrary pointer to pass to the function.
- * @flags: Bitmask that controls the operation. If %SCF_WAIT is set, wait
+ * @wait: Bitmask that controls the operation. If %SCF_WAIT is set, wait
  *        (atomically) until function has completed on other CPUs. If
  *        %SCF_RUN_LOCAL is set, the function will also be run locally
  *        if the local CPU is set in the @cpumask.
@@ -1180,7 +1182,13 @@ void wake_up_all_idle_cpus(void)
 EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);
 
 /**
- * smp_call_on_cpu - Call a function on a specific cpu
+ * struct smp_call_on_cpu_struct - Call a function on a specific CPU
+ * @work: &work_struct
+ * @done: &completion to signal
+ * @func: function to call
+ * @data: function's data argument
+ * @ret: return value from @func
+ * @cpu: target CPU (%-1 for any CPU)
  *
  * Used to call a function on a specific cpu and wait for it to return.
  * Optionally make sure the call is done on a specified physical cpu via vcpu
index cf6acab..f6bc0bc 100644 (file)
@@ -291,7 +291,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
        unsigned int cpu;
        int ret = 0;
 
-       get_online_cpus();
+       cpus_read_lock();
        mutex_lock(&smpboot_threads_lock);
        for_each_online_cpu(cpu) {
                ret = __smpboot_create_thread(plug_thread, cpu);
@@ -304,7 +304,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
        list_add(&plug_thread->list, &hotplug_threads);
 out:
        mutex_unlock(&smpboot_threads_lock);
-       put_online_cpus();
+       cpus_read_unlock();
        return ret;
 }
 EXPORT_SYMBOL_GPL(smpboot_register_percpu_thread);
@@ -317,12 +317,12 @@ EXPORT_SYMBOL_GPL(smpboot_register_percpu_thread);
  */
 void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
 {
-       get_online_cpus();
+       cpus_read_lock();
        mutex_lock(&smpboot_threads_lock);
        list_del(&plug_thread->list);
        smpboot_destroy_threads(plug_thread);
        mutex_unlock(&smpboot_threads_lock);
-       put_online_cpus();
+       cpus_read_unlock();
 }
 EXPORT_SYMBOL_GPL(smpboot_unregister_percpu_thread);
 
index f3a0121..322b65d 100644 (file)
@@ -422,7 +422,7 @@ static inline void invoke_softirq(void)
        if (ksoftirqd_running(local_softirq_pending()))
                return;
 
-       if (!force_irqthreads || !__this_cpu_read(ksoftirqd)) {
+       if (!force_irqthreads() || !__this_cpu_read(ksoftirqd)) {
 #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
                /*
                 * We can safely execute softirq on the current stack if
index 01df123..df922f4 100644 (file)
@@ -19,6 +19,8 @@
 #include <linux/prandom.h>
 #include <linux/cpu.h>
 
+#include "tick-internal.h"
+
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Paul E. McKenney <paulmck@kernel.org>");
 
@@ -34,9 +36,6 @@ static u64 wdtest_jiffies_read(struct clocksource *cs)
        return (u64)jiffies;
 }
 
-/* Assume HZ > 100. */
-#define JIFFIES_SHIFT  8
-
 static struct clocksource clocksource_wdtest_jiffies = {
        .name                   = "wdtest-jiffies",
        .rating                 = 1, /* lowest valid rating*/
index b89c76e..b8a14d2 100644 (file)
@@ -306,12 +306,12 @@ void clocksource_verify_percpu(struct clocksource *cs)
                return;
        cpumask_clear(&cpus_ahead);
        cpumask_clear(&cpus_behind);
-       get_online_cpus();
+       cpus_read_lock();
        preempt_disable();
        clocksource_verify_choose_cpus();
        if (cpumask_weight(&cpus_chosen) == 0) {
                preempt_enable();
-               put_online_cpus();
+               cpus_read_unlock();
                pr_warn("Not enough CPUs to check clocksource '%s'.\n", cs->name);
                return;
        }
@@ -337,7 +337,7 @@ void clocksource_verify_percpu(struct clocksource *cs)
                        cs_nsec_min = cs_nsec;
        }
        preempt_enable();
-       put_online_cpus();
+       cpus_read_unlock();
        if (!cpumask_empty(&cpus_ahead))
                pr_warn("        CPUs %*pbl ahead of CPU %d for clocksource %s.\n",
                        cpumask_pr_args(&cpus_ahead), testcpu, cs->name);
index 4a66725..0ea8702 100644 (file)
@@ -652,21 +652,10 @@ static inline int hrtimer_hres_active(void)
        return __hrtimer_hres_active(this_cpu_ptr(&hrtimer_bases));
 }
 
-/*
- * Reprogram the event source with checking both queues for the
- * next event
- * Called with interrupts disabled and base->lock held
- */
-static void
-hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
+static void __hrtimer_reprogram(struct hrtimer_cpu_base *cpu_base,
+                               struct hrtimer *next_timer,
+                               ktime_t expires_next)
 {
-       ktime_t expires_next;
-
-       expires_next = hrtimer_update_next_event(cpu_base);
-
-       if (skip_equal && expires_next == cpu_base->expires_next)
-               return;
-
        cpu_base->expires_next = expires_next;
 
        /*
@@ -689,7 +678,25 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
        if (!__hrtimer_hres_active(cpu_base) || cpu_base->hang_detected)
                return;
 
-       tick_program_event(cpu_base->expires_next, 1);
+       tick_program_event(expires_next, 1);
+}
+
+/*
+ * Reprogram the event source with checking both queues for the
+ * next event
+ * Called with interrupts disabled and base->lock held
+ */
+static void
+hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
+{
+       ktime_t expires_next;
+
+       expires_next = hrtimer_update_next_event(cpu_base);
+
+       if (skip_equal && expires_next == cpu_base->expires_next)
+               return;
+
+       __hrtimer_reprogram(cpu_base, cpu_base->next_timer, expires_next);
 }
 
 /* High resolution timer related functions */
@@ -720,23 +727,7 @@ static inline int hrtimer_is_hres_enabled(void)
        return hrtimer_hres_enabled;
 }
 
-/*
- * Retrigger next event is called after clock was set
- *
- * Called with interrupts disabled via on_each_cpu()
- */
-static void retrigger_next_event(void *arg)
-{
-       struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases);
-
-       if (!__hrtimer_hres_active(base))
-               return;
-
-       raw_spin_lock(&base->lock);
-       hrtimer_update_base(base);
-       hrtimer_force_reprogram(base, 0);
-       raw_spin_unlock(&base->lock);
-}
+static void retrigger_next_event(void *arg);
 
 /*
  * Switch to high resolution mode
@@ -758,29 +749,54 @@ static void hrtimer_switch_to_hres(void)
        retrigger_next_event(NULL);
 }
 
-static void clock_was_set_work(struct work_struct *work)
-{
-       clock_was_set();
-}
+#else
 
-static DECLARE_WORK(hrtimer_work, clock_was_set_work);
+static inline int hrtimer_is_hres_enabled(void) { return 0; }
+static inline void hrtimer_switch_to_hres(void) { }
 
+#endif /* CONFIG_HIGH_RES_TIMERS */
 /*
- * Called from timekeeping and resume code to reprogram the hrtimer
- * interrupt device on all cpus.
+ * Retrigger next event is called after clock was set with interrupts
+ * disabled through an SMP function call or directly from low level
+ * resume code.
+ *
+ * This is only invoked when:
+ *     - CONFIG_HIGH_RES_TIMERS is enabled.
+ *     - CONFIG_NOHZ_COMMON is enabled
+ *
+ * For the other cases this function is empty and because the call sites
+ * are optimized out it vanishes as well, i.e. no need for lots of
+ * #ifdeffery.
  */
-void clock_was_set_delayed(void)
+static void retrigger_next_event(void *arg)
 {
-       schedule_work(&hrtimer_work);
-}
-
-#else
+       struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases);
 
-static inline int hrtimer_is_hres_enabled(void) { return 0; }
-static inline void hrtimer_switch_to_hres(void) { }
-static inline void retrigger_next_event(void *arg) { }
+       /*
+        * When high resolution mode or nohz is active, then the offsets of
+        * CLOCK_REALTIME/TAI/BOOTTIME have to be updated. Otherwise the
+        * next tick will take care of that.
+        *
+        * If high resolution mode is active then the next expiring timer
+        * must be reevaluated and the clock event device reprogrammed if
+        * necessary.
+        *
+        * In the NOHZ case the update of the offset and the reevaluation
+        * of the next expiring timer is enough. The return from the SMP
+        * function call will take care of the reprogramming in case the
+        * CPU was in a NOHZ idle sleep.
+        */
+       if (!__hrtimer_hres_active(base) && !tick_nohz_active)
+               return;
 
-#endif /* CONFIG_HIGH_RES_TIMERS */
+       raw_spin_lock(&base->lock);
+       hrtimer_update_base(base);
+       if (__hrtimer_hres_active(base))
+               hrtimer_force_reprogram(base, 0);
+       else
+               hrtimer_update_next_event(base);
+       raw_spin_unlock(&base->lock);
+}
 
 /*
  * When a timer is enqueued and expires earlier than the already enqueued
@@ -835,75 +851,161 @@ static void hrtimer_reprogram(struct hrtimer *timer, bool reprogram)
        if (base->cpu_base != cpu_base)
                return;
 
+       if (expires >= cpu_base->expires_next)
+               return;
+
        /*
-        * If the hrtimer interrupt is running, then it will
-        * reevaluate the clock bases and reprogram the clock event
-        * device. The callbacks are always executed in hard interrupt
-        * context so we don't need an extra check for a running
-        * callback.
+        * If the hrtimer interrupt is running, then it will reevaluate the
+        * clock bases and reprogram the clock event device.
         */
        if (cpu_base->in_hrtirq)
                return;
 
-       if (expires >= cpu_base->expires_next)
-               return;
-
-       /* Update the pointer to the next expiring timer */
        cpu_base->next_timer = timer;
-       cpu_base->expires_next = expires;
+
+       __hrtimer_reprogram(cpu_base, timer, expires);
+}
+
+static bool update_needs_ipi(struct hrtimer_cpu_base *cpu_base,
+                            unsigned int active)
+{
+       struct hrtimer_clock_base *base;
+       unsigned int seq;
+       ktime_t expires;
 
        /*
-        * If hres is not active, hardware does not have to be
-        * programmed yet.
+        * Update the base offsets unconditionally so the following
+        * checks whether the SMP function call is required works.
         *
-        * If a hang was detected in the last timer interrupt then we
-        * do not schedule a timer which is earlier than the expiry
-        * which we enforced in the hang detection. We want the system
-        * to make progress.
+        * The update is safe even when the remote CPU is in the hrtimer
+        * interrupt or the hrtimer soft interrupt and expiring affected
+        * bases. Either it will see the update before handling a base or
+        * it will see it when it finishes the processing and reevaluates
+        * the next expiring timer.
         */
-       if (!__hrtimer_hres_active(cpu_base) || cpu_base->hang_detected)
-               return;
+       seq = cpu_base->clock_was_set_seq;
+       hrtimer_update_base(cpu_base);
+
+       /*
+        * If the sequence did not change over the update then the
+        * remote CPU already handled it.
+        */
+       if (seq == cpu_base->clock_was_set_seq)
+               return false;
+
+       /*
+        * If the remote CPU is currently handling an hrtimer interrupt, it
+        * will reevaluate the first expiring timer of all clock bases
+        * before reprogramming. Nothing to do here.
+        */
+       if (cpu_base->in_hrtirq)
+               return false;
 
        /*
-        * Program the timer hardware. We enforce the expiry for
-        * events which are already in the past.
+        * Walk the affected clock bases and check whether the first expiring
+        * timer in a clock base is moving ahead of the first expiring timer of
+        * @cpu_base. If so, the IPI must be invoked because per CPU clock
+        * event devices cannot be remotely reprogrammed.
         */
-       tick_program_event(expires, 1);
+       active &= cpu_base->active_bases;
+
+       for_each_active_base(base, cpu_base, active) {
+               struct timerqueue_node *next;
+
+               next = timerqueue_getnext(&base->active);
+               expires = ktime_sub(next->expires, base->offset);
+               if (expires < cpu_base->expires_next)
+                       return true;
+
+               /* Extra check for softirq clock bases */
+               if (base->clockid < HRTIMER_BASE_MONOTONIC_SOFT)
+                       continue;
+               if (cpu_base->softirq_activated)
+                       continue;
+               if (expires < cpu_base->softirq_expires_next)
+                       return true;
+       }
+       return false;
 }
 
 /*
- * Clock realtime was set
- *
- * Change the offset of the realtime clock vs. the monotonic
- * clock.
+ * Clock was set. This might affect CLOCK_REALTIME, CLOCK_TAI and
+ * CLOCK_BOOTTIME (for late sleep time injection).
  *
- * We might have to reprogram the high resolution timer interrupt. On
- * SMP we call the architecture specific code to retrigger _all_ high
- * resolution timer interrupts. On UP we just disable interrupts and
- * call the high resolution interrupt code.
+ * This requires to update the offsets for these clocks
+ * vs. CLOCK_MONOTONIC. When high resolution timers are enabled, then this
+ * also requires to eventually reprogram the per CPU clock event devices
+ * when the change moves an affected timer ahead of the first expiring
+ * timer on that CPU. Obviously remote per CPU clock event devices cannot
+ * be reprogrammed. The other reason why an IPI has to be sent is when the
+ * system is in !HIGH_RES and NOHZ mode. The NOHZ mode updates the offsets
+ * in the tick, which obviously might be stopped, so this has to bring out
+ * the remote CPU which might sleep in idle to get this sorted.
  */
-void clock_was_set(void)
+void clock_was_set(unsigned int bases)
 {
-#ifdef CONFIG_HIGH_RES_TIMERS
-       /* Retrigger the CPU local events everywhere */
-       on_each_cpu(retrigger_next_event, NULL, 1);
-#endif
+       struct hrtimer_cpu_base *cpu_base = raw_cpu_ptr(&hrtimer_bases);
+       cpumask_var_t mask;
+       int cpu;
+
+       if (!__hrtimer_hres_active(cpu_base) && !tick_nohz_active)
+               goto out_timerfd;
+
+       if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
+               on_each_cpu(retrigger_next_event, NULL, 1);
+               goto out_timerfd;
+       }
+
+       /* Avoid interrupting CPUs if possible */
+       cpus_read_lock();
+       for_each_online_cpu(cpu) {
+               unsigned long flags;
+
+               cpu_base = &per_cpu(hrtimer_bases, cpu);
+               raw_spin_lock_irqsave(&cpu_base->lock, flags);
+
+               if (update_needs_ipi(cpu_base, bases))
+                       cpumask_set_cpu(cpu, mask);
+
+               raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
+       }
+
+       preempt_disable();
+       smp_call_function_many(mask, retrigger_next_event, NULL, 1);
+       preempt_enable();
+       cpus_read_unlock();
+       free_cpumask_var(mask);
+
+out_timerfd:
        timerfd_clock_was_set();
 }
 
+static void clock_was_set_work(struct work_struct *work)
+{
+       clock_was_set(CLOCK_SET_WALL);
+}
+
+static DECLARE_WORK(hrtimer_work, clock_was_set_work);
+
+/*
+ * Called from timekeeping code to reprogram the hrtimer interrupt device
+ * on all cpus and to notify timerfd.
+ */
+void clock_was_set_delayed(void)
+{
+       schedule_work(&hrtimer_work);
+}
+
 /*
- * During resume we might have to reprogram the high resolution timer
- * interrupt on all online CPUs.  However, all other CPUs will be
- * stopped with IRQs interrupts disabled so the clock_was_set() call
- * must be deferred.
+ * Called during resume either directly from via timekeeping_resume()
+ * or in the case of s2idle from tick_unfreeze() to ensure that the
+ * hrtimers are up to date.
  */
-void hrtimers_resume(void)
+void hrtimers_resume_local(void)
 {
        lockdep_assert_irqs_disabled();
        /* Retrigger on the local CPU */
        retrigger_next_event(NULL);
-       /* And schedule a retrigger for all others */
-       clock_was_set_delayed();
 }
 
 /*
@@ -1030,12 +1132,13 @@ static void __remove_hrtimer(struct hrtimer *timer,
  * remove hrtimer, called with base lock held
  */
 static inline int
-remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool restart)
+remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base,
+              bool restart, bool keep_local)
 {
        u8 state = timer->state;
 
        if (state & HRTIMER_STATE_ENQUEUED) {
-               int reprogram;
+               bool reprogram;
 
                /*
                 * Remove the timer and force reprogramming when high
@@ -1048,8 +1151,16 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool rest
                debug_deactivate(timer);
                reprogram = base->cpu_base == this_cpu_ptr(&hrtimer_bases);
 
+               /*
+                * If the timer is not restarted then reprogramming is
+                * required if the timer is local. If it is local and about
+                * to be restarted, avoid programming it twice (on removal
+                * and a moment later when it's requeued).
+                */
                if (!restart)
                        state = HRTIMER_STATE_INACTIVE;
+               else
+                       reprogram &= !keep_local;
 
                __remove_hrtimer(timer, base, state, reprogram);
                return 1;
@@ -1103,9 +1214,31 @@ static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
                                    struct hrtimer_clock_base *base)
 {
        struct hrtimer_clock_base *new_base;
+       bool force_local, first;
 
-       /* Remove an active timer from the queue: */
-       remove_hrtimer(timer, base, true);
+       /*
+        * If the timer is on the local cpu base and is the first expiring
+        * timer then this might end up reprogramming the hardware twice
+        * (on removal and on enqueue). To avoid that by prevent the
+        * reprogram on removal, keep the timer local to the current CPU
+        * and enforce reprogramming after it is queued no matter whether
+        * it is the new first expiring timer again or not.
+        */
+       force_local = base->cpu_base == this_cpu_ptr(&hrtimer_bases);
+       force_local &= base->cpu_base->next_timer == timer;
+
+       /*
+        * Remove an active timer from the queue. In case it is not queued
+        * on the current CPU, make sure that remove_hrtimer() updates the
+        * remote data correctly.
+        *
+        * If it's on the current CPU and the first expiring timer, then
+        * skip reprogramming, keep the timer local and enforce
+        * reprogramming later if it was the first expiring timer.  This
+        * avoids programming the underlying clock event twice (once at
+        * removal and once after enqueue).
+        */
+       remove_hrtimer(timer, base, true, force_local);
 
        if (mode & HRTIMER_MODE_REL)
                tim = ktime_add_safe(tim, base->get_time());
@@ -1115,9 +1248,24 @@ static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
        hrtimer_set_expires_range_ns(timer, tim, delta_ns);
 
        /* Switch the timer base, if necessary: */
-       new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
+       if (!force_local) {
+               new_base = switch_hrtimer_base(timer, base,
+                                              mode & HRTIMER_MODE_PINNED);
+       } else {
+               new_base = base;
+       }
 
-       return enqueue_hrtimer(timer, new_base, mode);
+       first = enqueue_hrtimer(timer, new_base, mode);
+       if (!force_local)
+               return first;
+
+       /*
+        * Timer was forced to stay on the current CPU to avoid
+        * reprogramming on removal and enqueue. Force reprogram the
+        * hardware by evaluating the new first expiring timer.
+        */
+       hrtimer_force_reprogram(new_base->cpu_base, 1);
+       return 0;
 }
 
 /**
@@ -1183,7 +1331,7 @@ int hrtimer_try_to_cancel(struct hrtimer *timer)
        base = lock_hrtimer_base(timer, &flags);
 
        if (!hrtimer_callback_running(timer))
-               ret = remove_hrtimer(timer, base, false);
+               ret = remove_hrtimer(timer, base, false, false);
 
        unlock_hrtimer_base(timer, &flags);
 
index 01935aa..bc4db9e 100644 (file)
 #include <linux/init.h>
 
 #include "timekeeping.h"
+#include "tick-internal.h"
 
 
-/* Since jiffies uses a simple TICK_NSEC multiplier
- * conversion, the .shift value could be zero. However
- * this would make NTP adjustments impossible as they are
- * in units of 1/2^.shift. Thus we use JIFFIES_SHIFT to
- * shift both the nominator and denominator the same
- * amount, and give ntp adjustments in units of 1/2^8
- *
- * The value 8 is somewhat carefully chosen, as anything
- * larger can result in overflows. TICK_NSEC grows as HZ
- * shrinks, so values greater than 8 overflow 32bits when
- * HZ=100.
- */
-#if HZ < 34
-#define JIFFIES_SHIFT  6
-#elif HZ < 67
-#define JIFFIES_SHIFT  7
-#else
-#define JIFFIES_SHIFT  8
-#endif
-
 static u64 jiffies_read(struct clocksource *cs)
 {
        return (u64) jiffies;
index 517be7f..ee73686 100644 (file)
@@ -291,6 +291,8 @@ static void thread_group_start_cputime(struct task_struct *tsk, u64 *samples)
        struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
        struct posix_cputimers *pct = &tsk->signal->posix_cputimers;
 
+       lockdep_assert_task_sighand_held(tsk);
+
        /* Check if cputimer isn't running. This is accessed without locking. */
        if (!READ_ONCE(pct->timers_active)) {
                struct task_cputime sum;
@@ -405,6 +407,55 @@ static int posix_cpu_timer_create(struct k_itimer *new_timer)
        return 0;
 }
 
+static struct posix_cputimer_base *timer_base(struct k_itimer *timer,
+                                             struct task_struct *tsk)
+{
+       int clkidx = CPUCLOCK_WHICH(timer->it_clock);
+
+       if (CPUCLOCK_PERTHREAD(timer->it_clock))
+               return tsk->posix_cputimers.bases + clkidx;
+       else
+               return tsk->signal->posix_cputimers.bases + clkidx;
+}
+
+/*
+ * Force recalculating the base earliest expiration on the next tick.
+ * This will also re-evaluate the need to keep around the process wide
+ * cputime counter and tick dependency and eventually shut these down
+ * if necessary.
+ */
+static void trigger_base_recalc_expires(struct k_itimer *timer,
+                                       struct task_struct *tsk)
+{
+       struct posix_cputimer_base *base = timer_base(timer, tsk);
+
+       base->nextevt = 0;
+}
+
+/*
+ * Dequeue the timer and reset the base if it was its earliest expiration.
+ * It makes sure the next tick recalculates the base next expiration so we
+ * don't keep the costly process wide cputime counter around for a random
+ * amount of time, along with the tick dependency.
+ *
+ * If another timer gets queued between this and the next tick, its
+ * expiration will update the base next event if necessary on the next
+ * tick.
+ */
+static void disarm_timer(struct k_itimer *timer, struct task_struct *p)
+{
+       struct cpu_timer *ctmr = &timer->it.cpu;
+       struct posix_cputimer_base *base;
+
+       if (!cpu_timer_dequeue(ctmr))
+               return;
+
+       base = timer_base(timer, p);
+       if (cpu_timer_getexpires(ctmr) == base->nextevt)
+               trigger_base_recalc_expires(timer, p);
+}
+
+
 /*
  * Clean up a CPU-clock timer that is about to be destroyed.
  * This is called from timer deletion with the timer already locked.
@@ -439,7 +490,7 @@ static int posix_cpu_timer_del(struct k_itimer *timer)
                if (timer->it.cpu.firing)
                        ret = TIMER_RETRY;
                else
-                       cpu_timer_dequeue(ctmr);
+                       disarm_timer(timer, p);
 
                unlock_task_sighand(p, &flags);
        }
@@ -498,15 +549,9 @@ void posix_cpu_timers_exit_group(struct task_struct *tsk)
  */
 static void arm_timer(struct k_itimer *timer, struct task_struct *p)
 {
-       int clkidx = CPUCLOCK_WHICH(timer->it_clock);
+       struct posix_cputimer_base *base = timer_base(timer, p);
        struct cpu_timer *ctmr = &timer->it.cpu;
        u64 newexp = cpu_timer_getexpires(ctmr);
-       struct posix_cputimer_base *base;
-
-       if (CPUCLOCK_PERTHREAD(timer->it_clock))
-               base = p->posix_cputimers.bases + clkidx;
-       else
-               base = p->signal->posix_cputimers.bases + clkidx;
 
        if (!cpu_timer_enqueue(&base->tqhead, ctmr))
                return;
@@ -703,16 +748,29 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
        timer->it_overrun_last = 0;
        timer->it_overrun = -1;
 
-       if (new_expires != 0 && !(val < new_expires)) {
+       if (val >= new_expires) {
+               if (new_expires != 0) {
+                       /*
+                        * The designated time already passed, so we notify
+                        * immediately, even if the thread never runs to
+                        * accumulate more time on this clock.
+                        */
+                       cpu_timer_fire(timer);
+               }
+
                /*
-                * The designated time already passed, so we notify
-                * immediately, even if the thread never runs to
-                * accumulate more time on this clock.
+                * Make sure we don't keep around the process wide cputime
+                * counter or the tick dependency if they are not necessary.
                 */
-               cpu_timer_fire(timer);
-       }
+               sighand = lock_task_sighand(p, &flags);
+               if (!sighand)
+                       goto out;
+
+               if (!cpu_timer_queued(ctmr))
+                       trigger_base_recalc_expires(timer, p);
 
-       ret = 0;
+               unlock_task_sighand(p, &flags);
+       }
  out:
        rcu_read_unlock();
        if (old)
@@ -1346,8 +1404,6 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clkid,
                        }
                }
 
-               if (!*newval)
-                       return;
                *newval += now;
        }
 
index dd5697d..3913222 100644 (file)
@@ -336,7 +336,7 @@ void posixtimer_rearm(struct kernel_siginfo *info)
 int posix_timer_event(struct k_itimer *timr, int si_private)
 {
        enum pid_type type;
-       int ret = -1;
+       int ret;
        /*
         * FIXME: if ->sigq is queued we can race with
         * dequeue_signal()->posixtimer_rearm().
index d663249..4678935 100644 (file)
@@ -470,6 +470,13 @@ void tick_resume_local(void)
                else
                        tick_resume_oneshot();
        }
+
+       /*
+        * Ensure that hrtimers are up to date and the clockevents device
+        * is reprogrammed correctly when high resolution timers are
+        * enabled.
+        */
+       hrtimers_resume_local();
 }
 
 /**
index 6a742a2..649f2b4 100644 (file)
@@ -165,3 +165,35 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
 
 extern u64 get_next_timer_interrupt(unsigned long basej, u64 basem);
 void timer_clear_idle(void);
+
+#define CLOCK_SET_WALL                                                 \
+       (BIT(HRTIMER_BASE_REALTIME) | BIT(HRTIMER_BASE_REALTIME_SOFT) | \
+        BIT(HRTIMER_BASE_TAI) | BIT(HRTIMER_BASE_TAI_SOFT))
+
+#define CLOCK_SET_BOOT                                                 \
+       (BIT(HRTIMER_BASE_BOOTTIME) | BIT(HRTIMER_BASE_BOOTTIME_SOFT))
+
+void clock_was_set(unsigned int bases);
+void clock_was_set_delayed(void);
+
+void hrtimers_resume_local(void);
+
+/* Since jiffies uses a simple TICK_NSEC multiplier
+ * conversion, the .shift value could be zero. However
+ * this would make NTP adjustments impossible as they are
+ * in units of 1/2^.shift. Thus we use JIFFIES_SHIFT to
+ * shift both the nominator and denominator the same
+ * amount, and give ntp adjustments in units of 1/2^8
+ *
+ * The value 8 is somewhat carefully chosen, as anything
+ * larger can result in overflows. TICK_NSEC grows as HZ
+ * shrinks, so values greater than 8 overflow 32bits when
+ * HZ=100.
+ */
+#if HZ < 34
+#define JIFFIES_SHIFT  6
+#elif HZ < 67
+#define JIFFIES_SHIFT  7
+#else
+#define JIFFIES_SHIFT  8
+#endif
index 8a364aa..b348749 100644 (file)
@@ -1323,8 +1323,8 @@ out:
        write_seqcount_end(&tk_core.seq);
        raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
 
-       /* signal hrtimers about time change */
-       clock_was_set();
+       /* Signal hrtimers about time change */
+       clock_was_set(CLOCK_SET_WALL);
 
        if (!ret)
                audit_tk_injoffset(ts_delta);
@@ -1371,8 +1371,8 @@ error: /* even if we error out, we forwarded the time, so call update */
        write_seqcount_end(&tk_core.seq);
        raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
 
-       /* signal hrtimers about time change */
-       clock_was_set();
+       /* Signal hrtimers about time change */
+       clock_was_set(CLOCK_SET_WALL);
 
        return ret;
 }
@@ -1746,8 +1746,8 @@ void timekeeping_inject_sleeptime64(const struct timespec64 *delta)
        write_seqcount_end(&tk_core.seq);
        raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
 
-       /* signal hrtimers about time change */
-       clock_was_set();
+       /* Signal hrtimers about time change */
+       clock_was_set(CLOCK_SET_WALL | CLOCK_SET_BOOT);
 }
 #endif
 
@@ -1810,8 +1810,10 @@ void timekeeping_resume(void)
 
        touch_softlockup_watchdog();
 
+       /* Resume the clockevent device(s) and hrtimers */
        tick_resume();
-       hrtimers_resume();
+       /* Notify timerfd as resume is equivalent to clock_was_set() */
+       timerfd_resume();
 }
 
 int timekeeping_suspend(void)
@@ -2125,7 +2127,7 @@ static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset,
  * timekeeping_advance - Updates the timekeeper to the current time and
  * current NTP tick length
  */
-static void timekeeping_advance(enum timekeeping_adv_mode mode)
+static bool timekeeping_advance(enum timekeeping_adv_mode mode)
 {
        struct timekeeper *real_tk = &tk_core.timekeeper;
        struct timekeeper *tk = &shadow_timekeeper;
@@ -2196,9 +2198,8 @@ static void timekeeping_advance(enum timekeeping_adv_mode mode)
        write_seqcount_end(&tk_core.seq);
 out:
        raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
-       if (clock_set)
-               /* Have to call _delayed version, since in irq context*/
-               clock_was_set_delayed();
+
+       return !!clock_set;
 }
 
 /**
@@ -2207,7 +2208,8 @@ out:
  */
 void update_wall_time(void)
 {
-       timekeeping_advance(TK_ADV_TICK);
+       if (timekeeping_advance(TK_ADV_TICK))
+               clock_was_set_delayed();
 }
 
 /**
@@ -2387,8 +2389,9 @@ int do_adjtimex(struct __kernel_timex *txc)
 {
        struct timekeeper *tk = &tk_core.timekeeper;
        struct audit_ntp_data ad;
-       unsigned long flags;
+       bool clock_set = false;
        struct timespec64 ts;
+       unsigned long flags;
        s32 orig_tai, tai;
        int ret;
 
@@ -2423,6 +2426,7 @@ int do_adjtimex(struct __kernel_timex *txc)
        if (tai != orig_tai) {
                __timekeeping_set_tai_offset(tk, tai);
                timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
+               clock_set = true;
        }
        tk_update_leap_state(tk);
 
@@ -2433,10 +2437,10 @@ int do_adjtimex(struct __kernel_timex *txc)
 
        /* Update the multiplier immediately if frequency was set directly */
        if (txc->modes & (ADJ_FREQUENCY | ADJ_TICK))
-               timekeeping_advance(TK_ADV_FREQ);
+               clock_set |= timekeeping_advance(TK_ADV_FREQ);
 
-       if (tai != orig_tai)
-               clock_was_set();
+       if (clock_set)
+               clock_was_set(CLOCK_REALTIME);
 
        ntp_notify_cmos_timer();
 
index 9eb11c2..e3d2c23 100644 (file)
@@ -1265,8 +1265,10 @@ static inline void timer_base_unlock_expiry(struct timer_base *base)
 static void timer_sync_wait_running(struct timer_base *base)
 {
        if (atomic_read(&base->timer_waiters)) {
+               raw_spin_unlock_irq(&base->lock);
                spin_unlock(&base->expiry_lock);
                spin_lock(&base->expiry_lock);
+               raw_spin_lock_irq(&base->lock);
        }
 }
 
@@ -1457,14 +1459,14 @@ static void expire_timers(struct timer_base *base, struct hlist_head *head)
                if (timer->flags & TIMER_IRQSAFE) {
                        raw_spin_unlock(&base->lock);
                        call_timer_fn(timer, fn, baseclk);
-                       base->running_timer = NULL;
                        raw_spin_lock(&base->lock);
+                       base->running_timer = NULL;
                } else {
                        raw_spin_unlock_irq(&base->lock);
                        call_timer_fn(timer, fn, baseclk);
+                       raw_spin_lock_irq(&base->lock);
                        base->running_timer = NULL;
                        timer_sync_wait_running(base);
-                       raw_spin_lock_irq(&base->lock);
                }
        }
 }
index 0a315c3..bb8f411 100644 (file)
@@ -521,11 +521,11 @@ static void torture_shuffle_tasks(void)
        struct shuffle_task *stp;
 
        cpumask_setall(shuffle_tmp_mask);
-       get_online_cpus();
+       cpus_read_lock();
 
        /* No point in shuffling if there is only one online CPU (ex: UP) */
        if (num_online_cpus() == 1) {
-               put_online_cpus();
+               cpus_read_unlock();
                return;
        }
 
@@ -541,7 +541,7 @@ static void torture_shuffle_tasks(void)
                set_cpus_allowed_ptr(stp->st_t, shuffle_tmp_mask);
        mutex_unlock(&shuffle_task_mutex);
 
-       put_online_cpus();
+       cpus_read_unlock();
 }
 
 /* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
index d567b17..3ee23f4 100644 (file)
@@ -219,6 +219,11 @@ config DYNAMIC_FTRACE_WITH_DIRECT_CALLS
        depends on DYNAMIC_FTRACE_WITH_REGS
        depends on HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
 
+config DYNAMIC_FTRACE_WITH_ARGS
+       def_bool y
+       depends on DYNAMIC_FTRACE
+       depends on HAVE_DYNAMIC_FTRACE_WITH_ARGS
+
 config FUNCTION_PROFILER
        bool "Kernel function profiler"
        depends on FUNCTION_TRACER
index b4916ef..fdd1407 100644 (file)
@@ -990,28 +990,29 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
                return &bpf_get_numa_node_id_proto;
        case BPF_FUNC_perf_event_read:
                return &bpf_perf_event_read_proto;
-       case BPF_FUNC_probe_write_user:
-               return bpf_get_probe_write_proto();
        case BPF_FUNC_current_task_under_cgroup:
                return &bpf_current_task_under_cgroup_proto;
        case BPF_FUNC_get_prandom_u32:
                return &bpf_get_prandom_u32_proto;
+       case BPF_FUNC_probe_write_user:
+               return security_locked_down(LOCKDOWN_BPF_WRITE_USER) < 0 ?
+                      NULL : bpf_get_probe_write_proto();
        case BPF_FUNC_probe_read_user:
                return &bpf_probe_read_user_proto;
        case BPF_FUNC_probe_read_kernel:
-               return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+               return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
                       NULL : &bpf_probe_read_kernel_proto;
        case BPF_FUNC_probe_read_user_str:
                return &bpf_probe_read_user_str_proto;
        case BPF_FUNC_probe_read_kernel_str:
-               return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+               return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
                       NULL : &bpf_probe_read_kernel_str_proto;
 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
        case BPF_FUNC_probe_read:
-               return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+               return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
                       NULL : &bpf_probe_read_compat_proto;
        case BPF_FUNC_probe_read_str:
-               return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+               return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
                       NULL : &bpf_probe_read_compat_str_proto;
 #endif
 #ifdef CONFIG_CGROUPS
index 7b180f6..7efbc8a 100644 (file)
@@ -3100,6 +3100,7 @@ ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
 
 static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
 {
+       bool init_nop = ftrace_need_init_nop();
        struct ftrace_page *pg;
        struct dyn_ftrace *p;
        u64 start, stop;
@@ -3138,8 +3139,7 @@ static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
                         * Do the initial record conversion from mcount jump
                         * to the NOP instructions.
                         */
-                       if (!__is_defined(CC_USING_NOP_MCOUNT) &&
-                           !ftrace_nop_initialize(mod, p))
+                       if (init_nop && !ftrace_nop_initialize(mod, p))
                                break;
 
                        update_cnt++;
index c59dd35..a1adb29 100644 (file)
@@ -2897,14 +2897,26 @@ int tracepoint_printk_sysctl(struct ctl_table *table, int write,
 
 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
 {
+       enum event_trigger_type tt = ETT_NONE;
+       struct trace_event_file *file = fbuffer->trace_file;
+
+       if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event,
+                       fbuffer->entry, &tt))
+               goto discard;
+
        if (static_key_false(&tracepoint_printk_key.key))
                output_printk(fbuffer);
 
        if (static_branch_unlikely(&trace_event_exports_enabled))
                ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
-       event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer,
-                                   fbuffer->event, fbuffer->entry,
-                                   fbuffer->trace_ctx, fbuffer->regs);
+
+       trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer,
+                       fbuffer->event, fbuffer->trace_ctx, fbuffer->regs);
+
+discard:
+       if (tt)
+               event_triggers_post_call(file, tt);
+
 }
 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
 
@@ -9135,8 +9147,10 @@ static int trace_array_create_dir(struct trace_array *tr)
                return -EINVAL;
 
        ret = event_trace_add_tracer(tr->dir, tr);
-       if (ret)
+       if (ret) {
                tracefs_remove(tr->dir);
+               return ret;
+       }
 
        init_tracer_tracefs(tr, tr->dir);
        __update_tracer_options(tr);
index a180abf..4a0e693 100644 (file)
@@ -1389,38 +1389,6 @@ event_trigger_unlock_commit(struct trace_event_file *file,
                event_triggers_post_call(file, tt);
 }
 
-/**
- * event_trigger_unlock_commit_regs - handle triggers and finish event commit
- * @file: The file pointer associated with the event
- * @buffer: The ring buffer that the event is being written to
- * @event: The event meta data in the ring buffer
- * @entry: The event itself
- * @trace_ctx: The tracing context flags.
- *
- * This is a helper function to handle triggers that require data
- * from the event itself. It also tests the event against filters and
- * if the event is soft disabled and should be discarded.
- *
- * Same as event_trigger_unlock_commit() but calls
- * trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit().
- */
-static inline void
-event_trigger_unlock_commit_regs(struct trace_event_file *file,
-                                struct trace_buffer *buffer,
-                                struct ring_buffer_event *event,
-                                void *entry, unsigned int trace_ctx,
-                                struct pt_regs *regs)
-{
-       enum event_trigger_type tt = ETT_NONE;
-
-       if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
-               trace_buffer_unlock_commit_regs(file->tr, buffer, event,
-                                               trace_ctx, regs);
-
-       if (tt)
-               event_triggers_post_call(file, tt);
-}
-
 #define FILTER_PRED_INVALID    ((unsigned short)-1)
 #define FILTER_PRED_IS_RIGHT   (1 << 15)
 #define FILTER_PRED_FOLD       (1 << 15)
index 34325f4..a48aa2a 100644 (file)
@@ -65,7 +65,8 @@
        C(INVALID_SORT_MODIFIER,"Invalid sort modifier"),               \
        C(EMPTY_SORT_FIELD,     "Empty sort field"),                    \
        C(TOO_MANY_SORT_FIELDS, "Too many sort fields (Max = 2)"),      \
-       C(INVALID_SORT_FIELD,   "Sort field must be a key or a val"),
+       C(INVALID_SORT_FIELD,   "Sort field must be a key or a val"),   \
+       C(INVALID_STR_OPERAND,  "String type can not be an operand in expression"),
 
 #undef C
 #define C(a, b)                HIST_ERR_##a
@@ -2156,6 +2157,13 @@ static struct hist_field *parse_unary(struct hist_trigger_data *hist_data,
                ret = PTR_ERR(operand1);
                goto free;
        }
+       if (operand1->flags & HIST_FIELD_FL_STRING) {
+               /* String type can not be the operand of unary operator. */
+               hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(str));
+               destroy_hist_field(operand1, 0);
+               ret = -EINVAL;
+               goto free;
+       }
 
        expr->flags |= operand1->flags &
                (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
@@ -2257,6 +2265,11 @@ static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
                operand1 = NULL;
                goto free;
        }
+       if (operand1->flags & HIST_FIELD_FL_STRING) {
+               hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(operand1_str));
+               ret = -EINVAL;
+               goto free;
+       }
 
        /* rest of string could be another expression e.g. b+c in a+b+c */
        operand_flags = 0;
@@ -2266,6 +2279,11 @@ static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
                operand2 = NULL;
                goto free;
        }
+       if (operand2->flags & HIST_FIELD_FL_STRING) {
+               hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(str));
+               ret = -EINVAL;
+               goto free;
+       }
 
        ret = check_expr_operands(file->tr, operand1, operand2);
        if (ret)
@@ -2287,6 +2305,10 @@ static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
 
        expr->operands[0] = operand1;
        expr->operands[1] = operand2;
+
+       /* The operand sizes should be the same, so just pick one */
+       expr->size = operand1->size;
+
        expr->operator = field_op;
        expr->name = expr_str(expr, 0);
        expr->type = kstrdup(operand1->type, GFP_KERNEL);
@@ -3408,6 +3430,8 @@ trace_action_create_field_var(struct hist_trigger_data *hist_data,
                        event = data->match_data.event;
                }
 
+               if (!event)
+                       goto free;
                /*
                 * At this point, we're looking at a field on another
                 * event.  Because we can't modify a hist trigger on
index a6c0cda..14f46aa 100644 (file)
@@ -327,7 +327,7 @@ static void move_to_next_cpu(void)
 
        get_online_cpus();
        cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask);
-       next_cpu = cpumask_next(smp_processor_id(), current_mask);
+       next_cpu = cpumask_next(raw_smp_processor_id(), current_mask);
        put_online_cpus();
 
        if (next_cpu >= nr_cpu_ids)
index a7e3c24..b61eefe 100644 (file)
@@ -253,10 +253,40 @@ static struct osnoise_data {
  */
 static bool osnoise_busy;
 
+#ifdef CONFIG_PREEMPT_RT
 /*
  * Print the osnoise header info.
  */
 static void print_osnoise_headers(struct seq_file *s)
+{
+       if (osnoise_data.tainted)
+               seq_puts(s, "# osnoise is tainted!\n");
+
+       seq_puts(s, "#                                _-------=> irqs-off\n");
+       seq_puts(s, "#                               / _------=> need-resched\n");
+       seq_puts(s, "#                              | / _-----=> need-resched-lazy\n");
+       seq_puts(s, "#                              || / _----=> hardirq/softirq\n");
+       seq_puts(s, "#                              ||| / _---=> preempt-depth\n");
+       seq_puts(s, "#                              |||| / _--=> preempt-lazy-depth\n");
+       seq_puts(s, "#                              ||||| / _-=> migrate-disable\n");
+
+       seq_puts(s, "#                              |||||| /          ");
+       seq_puts(s, "                                     MAX\n");
+
+       seq_puts(s, "#                              ||||| /                         ");
+       seq_puts(s, "                    SINGLE      Interference counters:\n");
+
+       seq_puts(s, "#                              |||||||               RUNTIME   ");
+       seq_puts(s, "   NOISE  %% OF CPU  NOISE    +-----------------------------+\n");
+
+       seq_puts(s, "#           TASK-PID      CPU# |||||||   TIMESTAMP    IN US    ");
+       seq_puts(s, "   IN US  AVAILABLE  IN US     HW    NMI    IRQ   SIRQ THREAD\n");
+
+       seq_puts(s, "#              | |         |   |||||||      |           |      ");
+       seq_puts(s, "       |    |            |      |      |      |      |      |\n");
+}
+#else /* CONFIG_PREEMPT_RT */
+static void print_osnoise_headers(struct seq_file *s)
 {
        if (osnoise_data.tainted)
                seq_puts(s, "# osnoise is tainted!\n");
@@ -279,6 +309,7 @@ static void print_osnoise_headers(struct seq_file *s)
        seq_puts(s, "#              | |         |   ||||      |           |      ");
        seq_puts(s, "       |    |            |      |      |      |      |      |\n");
 }
+#endif /* CONFIG_PREEMPT_RT */
 
 /*
  * osnoise_taint - report an osnoise error.
@@ -323,6 +354,24 @@ static void trace_osnoise_sample(struct osnoise_sample *sample)
 /*
  * Print the timerlat header info.
  */
+#ifdef CONFIG_PREEMPT_RT
+static void print_timerlat_headers(struct seq_file *s)
+{
+       seq_puts(s, "#                                _-------=> irqs-off\n");
+       seq_puts(s, "#                               / _------=> need-resched\n");
+       seq_puts(s, "#                              | / _-----=> need-resched-lazy\n");
+       seq_puts(s, "#                              || / _----=> hardirq/softirq\n");
+       seq_puts(s, "#                              ||| / _---=> preempt-depth\n");
+       seq_puts(s, "#                              |||| / _--=> preempt-lazy-depth\n");
+       seq_puts(s, "#                              ||||| / _-=> migrate-disable\n");
+       seq_puts(s, "#                              |||||| /\n");
+       seq_puts(s, "#                              |||||||             ACTIVATION\n");
+       seq_puts(s, "#           TASK-PID      CPU# |||||||   TIMESTAMP    ID     ");
+       seq_puts(s, "       CONTEXT                LATENCY\n");
+       seq_puts(s, "#              | |         |   |||||||      |         |      ");
+       seq_puts(s, "            |                       |\n");
+}
+#else /* CONFIG_PREEMPT_RT */
 static void print_timerlat_headers(struct seq_file *s)
 {
        seq_puts(s, "#                                _-----=> irqs-off\n");
@@ -336,6 +385,7 @@ static void print_timerlat_headers(struct seq_file *s)
        seq_puts(s, "#              | |         |   ||||      |         |      ");
        seq_puts(s, "            |                       |\n");
 }
+#endif /* CONFIG_PREEMPT_RT */
 
 /*
  * Record an timerlat_sample into the tracer buffer.
@@ -1025,9 +1075,13 @@ diff_osn_sample_stats(struct osnoise_variables *osn_var, struct osnoise_sample *
 /*
  * osnoise_stop_tracing - Stop tracing and the tracer.
  */
-static void osnoise_stop_tracing(void)
+static __always_inline void osnoise_stop_tracing(void)
 {
        struct trace_array *tr = osnoise_trace;
+
+       trace_array_printk_buf(tr->array_buffer.buffer, _THIS_IP_,
+                       "stop tracing hit on cpu %d\n", smp_processor_id());
+
        tracer_tracing_off(tr);
 }
 
index fc32821..efd14c7 100644 (file)
 #include <linux/sched/task.h>
 #include <linux/static_key.h>
 
+enum tp_func_state {
+       TP_FUNC_0,
+       TP_FUNC_1,
+       TP_FUNC_2,
+       TP_FUNC_N,
+};
+
 extern tracepoint_ptr_t __start___tracepoints_ptrs[];
 extern tracepoint_ptr_t __stop___tracepoints_ptrs[];
 
 DEFINE_SRCU(tracepoint_srcu);
 EXPORT_SYMBOL_GPL(tracepoint_srcu);
 
+enum tp_transition_sync {
+       TP_TRANSITION_SYNC_1_0_1,
+       TP_TRANSITION_SYNC_N_2_1,
+
+       _NR_TP_TRANSITION_SYNC,
+};
+
+struct tp_transition_snapshot {
+       unsigned long rcu;
+       unsigned long srcu;
+       bool ongoing;
+};
+
+/* Protected by tracepoints_mutex */
+static struct tp_transition_snapshot tp_transition_snapshot[_NR_TP_TRANSITION_SYNC];
+
+static void tp_rcu_get_state(enum tp_transition_sync sync)
+{
+       struct tp_transition_snapshot *snapshot = &tp_transition_snapshot[sync];
+
+       /* Keep the latest get_state snapshot. */
+       snapshot->rcu = get_state_synchronize_rcu();
+       snapshot->srcu = start_poll_synchronize_srcu(&tracepoint_srcu);
+       snapshot->ongoing = true;
+}
+
+static void tp_rcu_cond_sync(enum tp_transition_sync sync)
+{
+       struct tp_transition_snapshot *snapshot = &tp_transition_snapshot[sync];
+
+       if (!snapshot->ongoing)
+               return;
+       cond_synchronize_rcu(snapshot->rcu);
+       if (!poll_state_synchronize_srcu(&tracepoint_srcu, snapshot->srcu))
+               synchronize_srcu(&tracepoint_srcu);
+       snapshot->ongoing = false;
+}
+
 /* Set to 1 to enable tracepoint debug output */
 static const int tracepoint_debug;
 
@@ -246,26 +291,29 @@ static void *func_remove(struct tracepoint_func **funcs,
        return old;
 }
 
-static void tracepoint_update_call(struct tracepoint *tp, struct tracepoint_func *tp_funcs, bool sync)
+/*
+ * Count the number of functions (enum tp_func_state) in a tp_funcs array.
+ */
+static enum tp_func_state nr_func_state(const struct tracepoint_func *tp_funcs)
+{
+       if (!tp_funcs)
+               return TP_FUNC_0;
+       if (!tp_funcs[1].func)
+               return TP_FUNC_1;
+       if (!tp_funcs[2].func)
+               return TP_FUNC_2;
+       return TP_FUNC_N;       /* 3 or more */
+}
+
+static void tracepoint_update_call(struct tracepoint *tp, struct tracepoint_func *tp_funcs)
 {
        void *func = tp->iterator;
 
        /* Synthetic events do not have static call sites */
        if (!tp->static_call_key)
                return;
-
-       if (!tp_funcs[1].func) {
+       if (nr_func_state(tp_funcs) == TP_FUNC_1)
                func = tp_funcs[0].func;
-               /*
-                * If going from the iterator back to a single caller,
-                * we need to synchronize with __DO_TRACE to make sure
-                * that the data passed to the callback is the one that
-                * belongs to that callback.
-                */
-               if (sync)
-                       tracepoint_synchronize_unregister();
-       }
-
        __static_call_update(tp->static_call_key, tp->static_call_tramp, func);
 }
 
@@ -299,9 +347,41 @@ static int tracepoint_add_func(struct tracepoint *tp,
         * a pointer to it.  This array is referenced by __DO_TRACE from
         * include/linux/tracepoint.h using rcu_dereference_sched().
         */
-       tracepoint_update_call(tp, tp_funcs, false);
-       rcu_assign_pointer(tp->funcs, tp_funcs);
-       static_key_enable(&tp->key);
+       switch (nr_func_state(tp_funcs)) {
+       case TP_FUNC_1:         /* 0->1 */
+               /*
+                * Make sure new static func never uses old data after a
+                * 1->0->1 transition sequence.
+                */
+               tp_rcu_cond_sync(TP_TRANSITION_SYNC_1_0_1);
+               /* Set static call to first function */
+               tracepoint_update_call(tp, tp_funcs);
+               /* Both iterator and static call handle NULL tp->funcs */
+               rcu_assign_pointer(tp->funcs, tp_funcs);
+               static_key_enable(&tp->key);
+               break;
+       case TP_FUNC_2:         /* 1->2 */
+               /* Set iterator static call */
+               tracepoint_update_call(tp, tp_funcs);
+               /*
+                * Iterator callback installed before updating tp->funcs.
+                * Requires ordering between RCU assign/dereference and
+                * static call update/call.
+                */
+               fallthrough;
+       case TP_FUNC_N:         /* N->N+1 (N>1) */
+               rcu_assign_pointer(tp->funcs, tp_funcs);
+               /*
+                * Make sure static func never uses incorrect data after a
+                * N->...->2->1 (N>1) transition sequence.
+                */
+               if (tp_funcs[0].data != old[0].data)
+                       tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1);
+               break;
+       default:
+               WARN_ON_ONCE(1);
+               break;
+       }
 
        release_probes(old);
        return 0;
@@ -328,17 +408,52 @@ static int tracepoint_remove_func(struct tracepoint *tp,
                /* Failed allocating new tp_funcs, replaced func with stub */
                return 0;
 
-       if (!tp_funcs) {
+       switch (nr_func_state(tp_funcs)) {
+       case TP_FUNC_0:         /* 1->0 */
                /* Removed last function */
                if (tp->unregfunc && static_key_enabled(&tp->key))
                        tp->unregfunc();
 
                static_key_disable(&tp->key);
+               /* Set iterator static call */
+               tracepoint_update_call(tp, tp_funcs);
+               /* Both iterator and static call handle NULL tp->funcs */
+               rcu_assign_pointer(tp->funcs, NULL);
+               /*
+                * Make sure new static func never uses old data after a
+                * 1->0->1 transition sequence.
+                */
+               tp_rcu_get_state(TP_TRANSITION_SYNC_1_0_1);
+               break;
+       case TP_FUNC_1:         /* 2->1 */
                rcu_assign_pointer(tp->funcs, tp_funcs);
-       } else {
+               /*
+                * Make sure static func never uses incorrect data after a
+                * N->...->2->1 (N>2) transition sequence. If the first
+                * element's data has changed, then force the synchronization
+                * to prevent current readers that have loaded the old data
+                * from calling the new function.
+                */
+               if (tp_funcs[0].data != old[0].data)
+                       tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1);
+               tp_rcu_cond_sync(TP_TRANSITION_SYNC_N_2_1);
+               /* Set static call to first function */
+               tracepoint_update_call(tp, tp_funcs);
+               break;
+       case TP_FUNC_2:         /* N->N-1 (N>2) */
+               fallthrough;
+       case TP_FUNC_N:
                rcu_assign_pointer(tp->funcs, tp_funcs);
-               tracepoint_update_call(tp, tp_funcs,
-                                      tp_funcs[0].func != old[0].func);
+               /*
+                * Make sure static func never uses incorrect data after a
+                * N->...->2->1 (N>2) transition sequence.
+                */
+               if (tp_funcs[0].data != old[0].data)
+                       tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1);
+               break;
+       default:
+               WARN_ON_ONCE(1);
+               break;
        }
        release_probes(old);
        return 0;
index 87799e2..bb51849 100644 (file)
@@ -58,14 +58,17 @@ static struct ctl_table_root set_root = {
        .permissions = set_permissions,
 };
 
-#define UCOUNT_ENTRY(name)                             \
-       {                                               \
-               .procname       = name,                 \
-               .maxlen         = sizeof(int),          \
-               .mode           = 0644,                 \
-               .proc_handler   = proc_dointvec_minmax, \
-               .extra1         = SYSCTL_ZERO,          \
-               .extra2         = SYSCTL_INT_MAX,       \
+static long ue_zero = 0;
+static long ue_int_max = INT_MAX;
+
+#define UCOUNT_ENTRY(name)                                     \
+       {                                                       \
+               .procname       = name,                         \
+               .maxlen         = sizeof(long),                 \
+               .mode           = 0644,                         \
+               .proc_handler   = proc_doulongvec_minmax,       \
+               .extra1         = &ue_zero,                     \
+               .extra2         = &ue_int_max,                  \
        }
 static struct ctl_table user_table[] = {
        UCOUNT_ENTRY("max_user_namespaces"),
@@ -160,6 +163,7 @@ struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid)
 {
        struct hlist_head *hashent = ucounts_hashentry(ns, uid);
        struct ucounts *ucounts, *new;
+       long overflow;
 
        spin_lock_irq(&ucounts_lock);
        ucounts = find_ucounts(ns, uid, hashent);
@@ -184,8 +188,12 @@ struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid)
                        return new;
                }
        }
+       overflow = atomic_add_negative(1, &ucounts->count);
        spin_unlock_irq(&ucounts_lock);
-       ucounts = get_ucounts(ucounts);
+       if (overflow) {
+               put_ucounts(ucounts);
+               return NULL;
+       }
        return ucounts;
 }
 
@@ -193,8 +201,7 @@ void put_ucounts(struct ucounts *ucounts)
 {
        unsigned long flags;
 
-       if (atomic_dec_and_test(&ucounts->count)) {
-               spin_lock_irqsave(&ucounts_lock, flags);
+       if (atomic_dec_and_lock_irqsave(&ucounts->count, &ucounts_lock, flags)) {
                hlist_del_init(&ucounts->node);
                spin_unlock_irqrestore(&ucounts_lock, flags);
                kfree(ucounts);
index d241fe4..5c9c068 100644 (file)
@@ -683,9 +683,6 @@ config PARMAN
 config OBJAGG
        tristate "objagg" if COMPILE_TEST
 
-config STRING_SELFTEST
-       tristate "Test string functions"
-
 endmenu
 
 config GENERIC_IOREMAP
index 8312127..73604bf 100644 (file)
@@ -1235,7 +1235,7 @@ config PROVE_LOCKING
        depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
        select LOCKDEP
        select DEBUG_SPINLOCK
-       select DEBUG_MUTEXES
+       select DEBUG_MUTEXES if !PREEMPT_RT
        select DEBUG_RT_MUTEXES if RT_MUTEXES
        select DEBUG_RWSEMS
        select DEBUG_WW_MUTEX_SLOWPATH
@@ -1299,7 +1299,7 @@ config LOCK_STAT
        depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
        select LOCKDEP
        select DEBUG_SPINLOCK
-       select DEBUG_MUTEXES
+       select DEBUG_MUTEXES if !PREEMPT_RT
        select DEBUG_RT_MUTEXES if RT_MUTEXES
        select DEBUG_LOCK_ALLOC
        default n
@@ -1335,7 +1335,7 @@ config DEBUG_SPINLOCK
 
 config DEBUG_MUTEXES
        bool "Mutex debugging: basic checks"
-       depends on DEBUG_KERNEL
+       depends on DEBUG_KERNEL && !PREEMPT_RT
        help
         This feature allows mutex semantics violations to be detected and
         reported.
@@ -1345,7 +1345,8 @@ config DEBUG_WW_MUTEX_SLOWPATH
        depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
        select DEBUG_LOCK_ALLOC
        select DEBUG_SPINLOCK
-       select DEBUG_MUTEXES
+       select DEBUG_MUTEXES if !PREEMPT_RT
+       select DEBUG_RT_MUTEXES if PREEMPT_RT
        help
         This feature enables slowpath testing for w/w mutex users by
         injecting additional -EDEADLK wound/backoff cases. Together with
@@ -1368,7 +1369,7 @@ config DEBUG_LOCK_ALLOC
        bool "Lock debugging: detect incorrect freeing of live locks"
        depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
        select DEBUG_SPINLOCK
-       select DEBUG_MUTEXES
+       select DEBUG_MUTEXES if !PREEMPT_RT
        select DEBUG_RT_MUTEXES if RT_MUTEXES
        select LOCKDEP
        help
@@ -1679,33 +1680,6 @@ config DEBUG_WQ_FORCE_RR_CPU
          feature by default.  When enabled, memory and cache locality will
          be impacted.
 
-config DEBUG_BLOCK_EXT_DEVT
-       bool "Force extended block device numbers and spread them"
-       depends on DEBUG_KERNEL
-       depends on BLOCK
-       default n
-       help
-         BIG FAT WARNING: ENABLING THIS OPTION MIGHT BREAK BOOTING ON
-         SOME DISTRIBUTIONS.  DO NOT ENABLE THIS UNLESS YOU KNOW WHAT
-         YOU ARE DOING.  Distros, please enable this and fix whatever
-         is broken.
-
-         Conventionally, block device numbers are allocated from
-         predetermined contiguous area.  However, extended block area
-         may introduce non-contiguous block device numbers.  This
-         option forces most block device numbers to be allocated from
-         the extended space and spreads them to discover kernel or
-         userland code paths which assume predetermined contiguous
-         device number allocation.
-
-         Note that turning on this debug option shuffles all the
-         device numbers for all IDE and SCSI devices including libata
-         ones, so root partition specified using device number
-         directly (via rdev or root=MAJ:MIN) won't work anymore.
-         Textual device names (root=/dev/sdXn) will continue to work.
-
-         Say N if you are unsure.
-
 config CPU_HOTPLUG_STATE_CONTROL
        bool "Enable CPU hotplug state control"
        depends on DEBUG_KERNEL
@@ -2180,6 +2154,9 @@ config ASYNC_RAID6_TEST
 config TEST_HEXDUMP
        tristate "Test functions located in the hexdump module at runtime"
 
+config STRING_SELFTEST
+       tristate "Test string functions at runtime"
+
 config TEST_STRING_HELPERS
        tristate "Test functions located in the string_helpers module at runtime"
 
index 14c032d..545ccbd 100644 (file)
@@ -128,3 +128,6 @@ config CRYPTO_LIB_CHACHA20POLY1305
 
 config CRYPTO_LIB_SHA256
        tristate
+
+config CRYPTO_LIB_SM4
+       tristate
index 3a43562..73205ed 100644 (file)
@@ -38,6 +38,9 @@ libpoly1305-y                                 += poly1305.o
 obj-$(CONFIG_CRYPTO_LIB_SHA256)                        += libsha256.o
 libsha256-y                                    := sha256.o
 
+obj-$(CONFIG_CRYPTO_LIB_SM4)                   += libsm4.o
+libsm4-y                                       := sm4.o
+
 ifneq ($(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS),y)
 libblake2s-y                                   += blake2s-selftest.o
 libchacha20poly1305-y                          += chacha20poly1305-selftest.o
index c64ac8b..4055aa5 100644 (file)
@@ -73,7 +73,7 @@ void blake2s256_hmac(u8 *out, const u8 *in, const u8 *key, const size_t inlen,
 }
 EXPORT_SYMBOL(blake2s256_hmac);
 
-static int __init mod_init(void)
+static int __init blake2s_mod_init(void)
 {
        if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) &&
            WARN_ON(!blake2s_selftest()))
@@ -81,12 +81,12 @@ static int __init mod_init(void)
        return 0;
 }
 
-static void __exit mod_exit(void)
+static void __exit blake2s_mod_exit(void)
 {
 }
 
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(blake2s_mod_init);
+module_exit(blake2s_mod_exit);
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("BLAKE2s hash function");
 MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
index c2fcdb9..fa6a944 100644 (file)
@@ -354,7 +354,7 @@ bool chacha20poly1305_decrypt_sg_inplace(struct scatterlist *src, size_t src_len
 }
 EXPORT_SYMBOL(chacha20poly1305_decrypt_sg_inplace);
 
-static int __init mod_init(void)
+static int __init chacha20poly1305_init(void)
 {
        if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) &&
            WARN_ON(!chacha20poly1305_selftest()))
@@ -362,12 +362,12 @@ static int __init mod_init(void)
        return 0;
 }
 
-static void __exit mod_exit(void)
+static void __exit chacha20poly1305_exit(void)
 {
 }
 
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(chacha20poly1305_init);
+module_exit(chacha20poly1305_exit);
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("ChaCha20Poly1305 AEAD construction");
 MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
index fb29739..064b352 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/module.h>
 #include <linux/init.h>
 
-static int __init mod_init(void)
+static int __init curve25519_init(void)
 {
        if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) &&
            WARN_ON(!curve25519_selftest()))
@@ -21,12 +21,12 @@ static int __init mod_init(void)
        return 0;
 }
 
-static void __exit mod_exit(void)
+static void __exit curve25519_exit(void)
 {
 }
 
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(curve25519_init);
+module_exit(curve25519_exit);
 
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("Curve25519 scalar multiplication");
diff --git a/lib/crypto/sm4.c b/lib/crypto/sm4.c
new file mode 100644 (file)
index 0000000..633b59f
--- /dev/null
@@ -0,0 +1,176 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * SM4, as specified in
+ * https://tools.ietf.org/id/draft-ribose-cfrg-sm4-10.html
+ *
+ * Copyright (C) 2018 ARM Limited or its affiliates.
+ * Copyright (c) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
+ */
+
+#include <linux/module.h>
+#include <asm/unaligned.h>
+#include <crypto/sm4.h>
+
+static const u32 fk[4] = {
+       0xa3b1bac6, 0x56aa3350, 0x677d9197, 0xb27022dc
+};
+
+static const u32 __cacheline_aligned ck[32] = {
+       0x00070e15, 0x1c232a31, 0x383f464d, 0x545b6269,
+       0x70777e85, 0x8c939aa1, 0xa8afb6bd, 0xc4cbd2d9,
+       0xe0e7eef5, 0xfc030a11, 0x181f262d, 0x343b4249,
+       0x50575e65, 0x6c737a81, 0x888f969d, 0xa4abb2b9,
+       0xc0c7ced5, 0xdce3eaf1, 0xf8ff060d, 0x141b2229,
+       0x30373e45, 0x4c535a61, 0x686f767d, 0x848b9299,
+       0xa0a7aeb5, 0xbcc3cad1, 0xd8dfe6ed, 0xf4fb0209,
+       0x10171e25, 0x2c333a41, 0x484f565d, 0x646b7279
+};
+
+static const u8 __cacheline_aligned sbox[256] = {
+       0xd6, 0x90, 0xe9, 0xfe, 0xcc, 0xe1, 0x3d, 0xb7,
+       0x16, 0xb6, 0x14, 0xc2, 0x28, 0xfb, 0x2c, 0x05,
+       0x2b, 0x67, 0x9a, 0x76, 0x2a, 0xbe, 0x04, 0xc3,
+       0xaa, 0x44, 0x13, 0x26, 0x49, 0x86, 0x06, 0x99,
+       0x9c, 0x42, 0x50, 0xf4, 0x91, 0xef, 0x98, 0x7a,
+       0x33, 0x54, 0x0b, 0x43, 0xed, 0xcf, 0xac, 0x62,
+       0xe4, 0xb3, 0x1c, 0xa9, 0xc9, 0x08, 0xe8, 0x95,
+       0x80, 0xdf, 0x94, 0xfa, 0x75, 0x8f, 0x3f, 0xa6,
+       0x47, 0x07, 0xa7, 0xfc, 0xf3, 0x73, 0x17, 0xba,
+       0x83, 0x59, 0x3c, 0x19, 0xe6, 0x85, 0x4f, 0xa8,
+       0x68, 0x6b, 0x81, 0xb2, 0x71, 0x64, 0xda, 0x8b,
+       0xf8, 0xeb, 0x0f, 0x4b, 0x70, 0x56, 0x9d, 0x35,
+       0x1e, 0x24, 0x0e, 0x5e, 0x63, 0x58, 0xd1, 0xa2,
+       0x25, 0x22, 0x7c, 0x3b, 0x01, 0x21, 0x78, 0x87,
+       0xd4, 0x00, 0x46, 0x57, 0x9f, 0xd3, 0x27, 0x52,
+       0x4c, 0x36, 0x02, 0xe7, 0xa0, 0xc4, 0xc8, 0x9e,
+       0xea, 0xbf, 0x8a, 0xd2, 0x40, 0xc7, 0x38, 0xb5,
+       0xa3, 0xf7, 0xf2, 0xce, 0xf9, 0x61, 0x15, 0xa1,
+       0xe0, 0xae, 0x5d, 0xa4, 0x9b, 0x34, 0x1a, 0x55,
+       0xad, 0x93, 0x32, 0x30, 0xf5, 0x8c, 0xb1, 0xe3,
+       0x1d, 0xf6, 0xe2, 0x2e, 0x82, 0x66, 0xca, 0x60,
+       0xc0, 0x29, 0x23, 0xab, 0x0d, 0x53, 0x4e, 0x6f,
+       0xd5, 0xdb, 0x37, 0x45, 0xde, 0xfd, 0x8e, 0x2f,
+       0x03, 0xff, 0x6a, 0x72, 0x6d, 0x6c, 0x5b, 0x51,
+       0x8d, 0x1b, 0xaf, 0x92, 0xbb, 0xdd, 0xbc, 0x7f,
+       0x11, 0xd9, 0x5c, 0x41, 0x1f, 0x10, 0x5a, 0xd8,
+       0x0a, 0xc1, 0x31, 0x88, 0xa5, 0xcd, 0x7b, 0xbd,
+       0x2d, 0x74, 0xd0, 0x12, 0xb8, 0xe5, 0xb4, 0xb0,
+       0x89, 0x69, 0x97, 0x4a, 0x0c, 0x96, 0x77, 0x7e,
+       0x65, 0xb9, 0xf1, 0x09, 0xc5, 0x6e, 0xc6, 0x84,
+       0x18, 0xf0, 0x7d, 0xec, 0x3a, 0xdc, 0x4d, 0x20,
+       0x79, 0xee, 0x5f, 0x3e, 0xd7, 0xcb, 0x39, 0x48
+};
+
+static inline u32 sm4_t_non_lin_sub(u32 x)
+{
+       u32 out;
+
+       out  = (u32)sbox[x & 0xff];
+       out |= (u32)sbox[(x >> 8) & 0xff] << 8;
+       out |= (u32)sbox[(x >> 16) & 0xff] << 16;
+       out |= (u32)sbox[(x >> 24) & 0xff] << 24;
+
+       return out;
+}
+
+static inline u32 sm4_key_lin_sub(u32 x)
+{
+       return x ^ rol32(x, 13) ^ rol32(x, 23);
+}
+
+static inline u32 sm4_enc_lin_sub(u32 x)
+{
+       return x ^ rol32(x, 2) ^ rol32(x, 10) ^ rol32(x, 18) ^ rol32(x, 24);
+}
+
+static inline u32 sm4_key_sub(u32 x)
+{
+       return sm4_key_lin_sub(sm4_t_non_lin_sub(x));
+}
+
+static inline u32 sm4_enc_sub(u32 x)
+{
+       return sm4_enc_lin_sub(sm4_t_non_lin_sub(x));
+}
+
+static inline u32 sm4_round(u32 x0, u32 x1, u32 x2, u32 x3, u32 rk)
+{
+       return x0 ^ sm4_enc_sub(x1 ^ x2 ^ x3 ^ rk);
+}
+
+
+/**
+ * sm4_expandkey - Expands the SM4 key as described in GB/T 32907-2016
+ * @ctx:       The location where the computed key will be stored.
+ * @in_key:    The supplied key.
+ * @key_len:   The length of the supplied key.
+ *
+ * Returns 0 on success. The function fails only if an invalid key size (or
+ * pointer) is supplied.
+ */
+int sm4_expandkey(struct sm4_ctx *ctx, const u8 *in_key,
+                         unsigned int key_len)
+{
+       u32 rk[4];
+       const u32 *key = (u32 *)in_key;
+       int i;
+
+       if (key_len != SM4_KEY_SIZE)
+               return -EINVAL;
+
+       rk[0] = get_unaligned_be32(&key[0]) ^ fk[0];
+       rk[1] = get_unaligned_be32(&key[1]) ^ fk[1];
+       rk[2] = get_unaligned_be32(&key[2]) ^ fk[2];
+       rk[3] = get_unaligned_be32(&key[3]) ^ fk[3];
+
+       for (i = 0; i < 32; i += 4) {
+               rk[0] ^= sm4_key_sub(rk[1] ^ rk[2] ^ rk[3] ^ ck[i + 0]);
+               rk[1] ^= sm4_key_sub(rk[2] ^ rk[3] ^ rk[0] ^ ck[i + 1]);
+               rk[2] ^= sm4_key_sub(rk[3] ^ rk[0] ^ rk[1] ^ ck[i + 2]);
+               rk[3] ^= sm4_key_sub(rk[0] ^ rk[1] ^ rk[2] ^ ck[i + 3]);
+
+               ctx->rkey_enc[i + 0] = rk[0];
+               ctx->rkey_enc[i + 1] = rk[1];
+               ctx->rkey_enc[i + 2] = rk[2];
+               ctx->rkey_enc[i + 3] = rk[3];
+               ctx->rkey_dec[31 - 0 - i] = rk[0];
+               ctx->rkey_dec[31 - 1 - i] = rk[1];
+               ctx->rkey_dec[31 - 2 - i] = rk[2];
+               ctx->rkey_dec[31 - 3 - i] = rk[3];
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(sm4_expandkey);
+
+/**
+ * sm4_crypt_block - Encrypt or decrypt a single SM4 block
+ * @rk:                The rkey_enc for encrypt or rkey_dec for decrypt
+ * @out:       Buffer to store output data
+ * @in:        Buffer containing the input data
+ */
+void sm4_crypt_block(const u32 *rk, u8 *out, const u8 *in)
+{
+       u32 x[4], i;
+
+       x[0] = get_unaligned_be32(in + 0 * 4);
+       x[1] = get_unaligned_be32(in + 1 * 4);
+       x[2] = get_unaligned_be32(in + 2 * 4);
+       x[3] = get_unaligned_be32(in + 3 * 4);
+
+       for (i = 0; i < 32; i += 4) {
+               x[0] = sm4_round(x[0], x[1], x[2], x[3], rk[i + 0]);
+               x[1] = sm4_round(x[1], x[2], x[3], x[0], rk[i + 1]);
+               x[2] = sm4_round(x[2], x[3], x[0], x[1], rk[i + 2]);
+               x[3] = sm4_round(x[3], x[0], x[1], x[2], rk[i + 3]);
+       }
+
+       put_unaligned_be32(x[3 - 0], out + 0 * 4);
+       put_unaligned_be32(x[3 - 1], out + 1 * 4);
+       put_unaligned_be32(x[3 - 2], out + 2 * 4);
+       put_unaligned_be32(x[3 - 3], out + 3 * 4);
+}
+EXPORT_SYMBOL_GPL(sm4_crypt_block);
+
+MODULE_DESCRIPTION("Generic SM4 library");
+MODULE_LICENSE("GPL v2");
index 9e14ae0..6946f8e 100644 (file)
@@ -557,7 +557,12 @@ __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack
        struct debug_obj *obj;
        unsigned long flags;
 
-       fill_pool();
+       /*
+        * On RT enabled kernels the pool refill must happen in preemptible
+        * context:
+        */
+       if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible())
+               fill_pool();
 
        db = get_bucket((unsigned long) addr);
 
index c0d67c5..60be9e2 100644 (file)
@@ -19,7 +19,7 @@
  */
 int devmem_is_allowed(unsigned long pfn)
 {
-       if (iomem_is_exclusive(pfn << PAGE_SHIFT))
+       if (iomem_is_exclusive(PFN_PHYS(pfn)))
                return 0;
        if (!page_is_ram(pfn))
                return 1;
index ced5c15..a1a7dfa 100644 (file)
@@ -241,5 +241,36 @@ int linear_range_get_selector_high(const struct linear_range *r,
 }
 EXPORT_SYMBOL_GPL(linear_range_get_selector_high);
 
+/**
+ * linear_range_get_selector_within - return linear range selector for value
+ * @r:         pointer to linear range where selector is looked from
+ * @val:       value for which the selector is searched
+ * @selector:  address where found selector value is updated
+ *
+ * Return selector for which range value is closest match for given
+ * input value. Value is matching if it is equal or lower than given
+ * value. But return maximum selector if given value is higher than
+ * maximum value.
+ */
+void linear_range_get_selector_within(const struct linear_range *r,
+                                     unsigned int val, unsigned int *selector)
+{
+       if (r->min > val) {
+               *selector = r->min_sel;
+               return;
+       }
+
+       if (linear_range_get_max_value(r) < val) {
+               *selector = r->max_sel;
+               return;
+       }
+
+       if (r->step == 0)
+               *selector = r->min_sel;
+       else
+               *selector = (val - r->min) / r->step + r->min_sel;
+}
+EXPORT_SYMBOL_GPL(linear_range_get_selector_within);
+
 MODULE_DESCRIPTION("linear-ranges helper");
 MODULE_LICENSE("GPL");
index 9a75ca3..bc81419 100644 (file)
@@ -148,7 +148,7 @@ int mpi_resize(MPI a, unsigned nlimbs)
                return 0;       /* no need to do it */
 
        if (a->d) {
-               p = kmalloc_array(nlimbs, sizeof(mpi_limb_t), GFP_KERNEL);
+               p = kcalloc(nlimbs, sizeof(mpi_limb_t), GFP_KERNEL);
                if (!p)
                        return -ENOMEM;
                memcpy(p, a->d, a->alloced * sizeof(mpi_limb_t));
index 8b7d623..59149bf 100644 (file)
@@ -3,10 +3,12 @@
 #include <linux/spinlock.h>
 #include <linux/once.h>
 #include <linux/random.h>
+#include <linux/module.h>
 
 struct once_work {
        struct work_struct work;
        struct static_key_true *key;
+       struct module *module;
 };
 
 static void once_deferred(struct work_struct *w)
@@ -16,10 +18,11 @@ static void once_deferred(struct work_struct *w)
        work = container_of(w, struct once_work, work);
        BUG_ON(!static_key_enabled(work->key));
        static_branch_disable(work->key);
+       module_put(work->module);
        kfree(work);
 }
 
-static void once_disable_jump(struct static_key_true *key)
+static void once_disable_jump(struct static_key_true *key, struct module *mod)
 {
        struct once_work *w;
 
@@ -29,6 +32,8 @@ static void once_disable_jump(struct static_key_true *key)
 
        INIT_WORK(&w->work, once_deferred);
        w->key = key;
+       w->module = mod;
+       __module_get(mod);
        schedule_work(&w->work);
 }
 
@@ -53,11 +58,11 @@ bool __do_once_start(bool *done, unsigned long *flags)
 EXPORT_SYMBOL(__do_once_start);
 
 void __do_once_done(bool *done, struct static_key_true *once_key,
-                   unsigned long *flags)
+                   unsigned long *flags, struct module *mod)
        __releases(once_lock)
 {
        *done = true;
        spin_unlock_irqrestore(&once_lock, *flags);
-       once_disable_jump(once_key);
+       once_disable_jump(once_key, mod);
 }
 EXPORT_SYMBOL(__do_once_done);
index 77bd0b1..b2de45a 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/errno.h>
 #include <linux/slab.h>
 
+#include <asm/unaligned.h>
 #include <asm/byteorder.h>
 #include <asm/word-at-a-time.h>
 #include <asm/page.h>
@@ -935,6 +936,21 @@ __visible int memcmp(const void *cs, const void *ct, size_t count)
        const unsigned char *su1, *su2;
        int res = 0;
 
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+       if (count >= sizeof(unsigned long)) {
+               const unsigned long *u1 = cs;
+               const unsigned long *u2 = ct;
+               do {
+                       if (get_unaligned(u1) != get_unaligned(u2))
+                               break;
+                       u1++;
+                       u2++;
+                       count -= sizeof(unsigned long);
+               } while (count >= sizeof(unsigned long));
+               cs = u1;
+               ct = u2;
+       }
+#endif
        for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--)
                if ((res = *su1 - *su2) != 0)
                        break;
index 864554e..906b598 100644 (file)
@@ -485,13 +485,13 @@ static int __init test_lockup_init(void)
                       offsetof(spinlock_t, lock.wait_lock.magic),
                       SPINLOCK_MAGIC) ||
            test_magic(lock_rwlock_ptr,
-                      offsetof(rwlock_t, rtmutex.wait_lock.magic),
+                      offsetof(rwlock_t, rwbase.rtmutex.wait_lock.magic),
                       SPINLOCK_MAGIC) ||
            test_magic(lock_mutex_ptr,
-                      offsetof(struct mutex, lock.wait_lock.magic),
+                      offsetof(struct mutex, rtmutex.wait_lock.magic),
                       SPINLOCK_MAGIC) ||
            test_magic(lock_rwsem_ptr,
-                      offsetof(struct rw_semaphore, rtmutex.wait_lock.magic),
+                      offsetof(struct rw_semaphore, rwbase.rtmutex.wait_lock.magic),
                       SPINLOCK_MAGIC))
                return -EINVAL;
 #else
@@ -502,7 +502,7 @@ static int __init test_lockup_init(void)
                       offsetof(rwlock_t, magic),
                       RWLOCK_MAGIC) ||
            test_magic(lock_mutex_ptr,
-                      offsetof(struct mutex, wait_lock.rlock.magic),
+                      offsetof(struct mutex, wait_lock.magic),
                       SPINLOCK_MAGIC) ||
            test_magic(lock_rwsem_ptr,
                       offsetof(struct rw_semaphore, wait_lock.magic),
index f5561ea..cd06dca 100644 (file)
@@ -807,6 +807,7 @@ struct backing_dev_info *bdi_alloc(int node_id)
        bdi->capabilities = BDI_CAP_WRITEBACK | BDI_CAP_WRITEBACK_ACCT;
        bdi->ra_pages = VM_READAHEAD_PAGES;
        bdi->io_pages = VM_READAHEAD_PAGES;
+       timer_setup(&bdi->laptop_mode_wb_timer, laptop_mode_timer_fn, 0);
        return bdi;
 }
 EXPORT_SYMBOL(bdi_alloc);
@@ -928,6 +929,8 @@ static void bdi_remove_from_list(struct backing_dev_info *bdi)
 
 void bdi_unregister(struct backing_dev_info *bdi)
 {
+       del_timer_sync(&bdi->laptop_mode_wb_timer);
+
        /* make sure nobody finds us on the bdi_list anymore */
        bdi_remove_from_list(bdi);
        wb_shutdown(&bdi->wb);
index d1458ec..0fad083 100644 (file)
@@ -76,8 +76,9 @@
  *      ->swap_lock            (exclusive_swap_page, others)
  *        ->i_pages lock
  *
- *  ->i_mutex
- *    ->i_mmap_rwsem           (truncate->unmap_mapping_range)
+ *  ->i_rwsem
+ *    ->invalidate_lock                (acquired by fs in truncate path)
+ *      ->i_mmap_rwsem         (truncate->unmap_mapping_range)
  *
  *  ->mmap_lock
  *    ->i_mmap_rwsem
  *        ->i_pages lock       (arch-dependent flush_dcache_mmap_lock)
  *
  *  ->mmap_lock
- *    ->lock_page              (access_process_vm)
+ *    ->invalidate_lock                (filemap_fault)
+ *      ->lock_page            (filemap_fault, access_process_vm)
  *
- *  ->i_mutex                  (generic_perform_write)
+ *  ->i_rwsem                  (generic_perform_write)
  *    ->mmap_lock              (fault_in_pages_readable->do_page_fault)
  *
  *  bdi->wb.list_lock
@@ -1007,6 +1009,44 @@ struct page *__page_cache_alloc(gfp_t gfp)
 EXPORT_SYMBOL(__page_cache_alloc);
 #endif
 
+/*
+ * filemap_invalidate_lock_two - lock invalidate_lock for two mappings
+ *
+ * Lock exclusively invalidate_lock of any passed mapping that is not NULL.
+ *
+ * @mapping1: the first mapping to lock
+ * @mapping2: the second mapping to lock
+ */
+void filemap_invalidate_lock_two(struct address_space *mapping1,
+                                struct address_space *mapping2)
+{
+       if (mapping1 > mapping2)
+               swap(mapping1, mapping2);
+       if (mapping1)
+               down_write(&mapping1->invalidate_lock);
+       if (mapping2 && mapping1 != mapping2)
+               down_write_nested(&mapping2->invalidate_lock, 1);
+}
+EXPORT_SYMBOL(filemap_invalidate_lock_two);
+
+/*
+ * filemap_invalidate_unlock_two - unlock invalidate_lock for two mappings
+ *
+ * Unlock exclusive invalidate_lock of any passed mapping that is not NULL.
+ *
+ * @mapping1: the first mapping to unlock
+ * @mapping2: the second mapping to unlock
+ */
+void filemap_invalidate_unlock_two(struct address_space *mapping1,
+                                  struct address_space *mapping2)
+{
+       if (mapping1)
+               up_write(&mapping1->invalidate_lock);
+       if (mapping2 && mapping1 != mapping2)
+               up_write(&mapping2->invalidate_lock);
+}
+EXPORT_SYMBOL(filemap_invalidate_unlock_two);
+
 /*
  * In order to wait for pages to become available there must be
  * waitqueues associated with pages. By using a hash table of
@@ -2368,20 +2408,30 @@ static int filemap_update_page(struct kiocb *iocb,
 {
        int error;
 
+       if (iocb->ki_flags & IOCB_NOWAIT) {
+               if (!filemap_invalidate_trylock_shared(mapping))
+                       return -EAGAIN;
+       } else {
+               filemap_invalidate_lock_shared(mapping);
+       }
+
        if (!trylock_page(page)) {
+               error = -EAGAIN;
                if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO))
-                       return -EAGAIN;
+                       goto unlock_mapping;
                if (!(iocb->ki_flags & IOCB_WAITQ)) {
+                       filemap_invalidate_unlock_shared(mapping);
                        put_and_wait_on_page_locked(page, TASK_KILLABLE);
                        return AOP_TRUNCATED_PAGE;
                }
                error = __lock_page_async(page, iocb->ki_waitq);
                if (error)
-                       return error;
+                       goto unlock_mapping;
        }
 
+       error = AOP_TRUNCATED_PAGE;
        if (!page->mapping)
-               goto truncated;
+               goto unlock;
 
        error = 0;
        if (filemap_range_uptodate(mapping, iocb->ki_pos, iter, page))
@@ -2392,15 +2442,13 @@ static int filemap_update_page(struct kiocb *iocb,
                goto unlock;
 
        error = filemap_read_page(iocb->ki_filp, mapping, page);
-       if (error == AOP_TRUNCATED_PAGE)
-               put_page(page);
-       return error;
-truncated:
-       unlock_page(page);
-       put_page(page);
-       return AOP_TRUNCATED_PAGE;
+       goto unlock_mapping;
 unlock:
        unlock_page(page);
+unlock_mapping:
+       filemap_invalidate_unlock_shared(mapping);
+       if (error == AOP_TRUNCATED_PAGE)
+               put_page(page);
        return error;
 }
 
@@ -2415,6 +2463,19 @@ static int filemap_create_page(struct file *file,
        if (!page)
                return -ENOMEM;
 
+       /*
+        * Protect against truncate / hole punch. Grabbing invalidate_lock here
+        * assures we cannot instantiate and bring uptodate new pagecache pages
+        * after evicting page cache during truncate and before actually
+        * freeing blocks.  Note that we could release invalidate_lock after
+        * inserting the page into page cache as the locked page would then be
+        * enough to synchronize with hole punching. But there are code paths
+        * such as filemap_update_page() filling in partially uptodate pages or
+        * ->readpages() that need to hold invalidate_lock while mapping blocks
+        * for IO so let's hold the lock here as well to keep locking rules
+        * simple.
+        */
+       filemap_invalidate_lock_shared(mapping);
        error = add_to_page_cache_lru(page, mapping, index,
                        mapping_gfp_constraint(mapping, GFP_KERNEL));
        if (error == -EEXIST)
@@ -2426,9 +2487,11 @@ static int filemap_create_page(struct file *file,
        if (error)
                goto error;
 
+       filemap_invalidate_unlock_shared(mapping);
        pagevec_add(pvec, page);
        return 0;
 error:
+       filemap_invalidate_unlock_shared(mapping);
        put_page(page);
        return error;
 }
@@ -2967,6 +3030,7 @@ vm_fault_t filemap_fault(struct vm_fault *vmf)
        pgoff_t max_off;
        struct page *page;
        vm_fault_t ret = 0;
+       bool mapping_locked = false;
 
        max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
        if (unlikely(offset >= max_off))
@@ -2976,25 +3040,39 @@ vm_fault_t filemap_fault(struct vm_fault *vmf)
         * Do we have something in the page cache already?
         */
        page = find_get_page(mapping, offset);
-       if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) {
+       if (likely(page)) {
                /*
-                * We found the page, so try async readahead before
-                * waiting for the lock.
+                * We found the page, so try async readahead before waiting for
+                * the lock.
                 */
-               fpin = do_async_mmap_readahead(vmf, page);
-       } else if (!page) {
+               if (!(vmf->flags & FAULT_FLAG_TRIED))
+                       fpin = do_async_mmap_readahead(vmf, page);
+               if (unlikely(!PageUptodate(page))) {
+                       filemap_invalidate_lock_shared(mapping);
+                       mapping_locked = true;
+               }
+       } else {
                /* No page in the page cache at all */
                count_vm_event(PGMAJFAULT);
                count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
                ret = VM_FAULT_MAJOR;
                fpin = do_sync_mmap_readahead(vmf);
 retry_find:
+               /*
+                * See comment in filemap_create_page() why we need
+                * invalidate_lock
+                */
+               if (!mapping_locked) {
+                       filemap_invalidate_lock_shared(mapping);
+                       mapping_locked = true;
+               }
                page = pagecache_get_page(mapping, offset,
                                          FGP_CREAT|FGP_FOR_MMAP,
                                          vmf->gfp_mask);
                if (!page) {
                        if (fpin)
                                goto out_retry;
+                       filemap_invalidate_unlock_shared(mapping);
                        return VM_FAULT_OOM;
                }
        }
@@ -3014,8 +3092,20 @@ retry_find:
         * We have a locked page in the page cache, now we need to check
         * that it's up-to-date. If not, it is going to be due to an error.
         */
-       if (unlikely(!PageUptodate(page)))
+       if (unlikely(!PageUptodate(page))) {
+               /*
+                * The page was in cache and uptodate and now it is not.
+                * Strange but possible since we didn't hold the page lock all
+                * the time. Let's drop everything get the invalidate lock and
+                * try again.
+                */
+               if (!mapping_locked) {
+                       unlock_page(page);
+                       put_page(page);
+                       goto retry_find;
+               }
                goto page_not_uptodate;
+       }
 
        /*
         * We've made it this far and we had to drop our mmap_lock, now is the
@@ -3026,6 +3116,8 @@ retry_find:
                unlock_page(page);
                goto out_retry;
        }
+       if (mapping_locked)
+               filemap_invalidate_unlock_shared(mapping);
 
        /*
         * Found the page and have a reference on it.
@@ -3056,6 +3148,7 @@ page_not_uptodate:
 
        if (!error || error == AOP_TRUNCATED_PAGE)
                goto retry_find;
+       filemap_invalidate_unlock_shared(mapping);
 
        return VM_FAULT_SIGBUS;
 
@@ -3067,6 +3160,8 @@ out_retry:
         */
        if (page)
                put_page(page);
+       if (mapping_locked)
+               filemap_invalidate_unlock_shared(mapping);
        if (fpin)
                fput(fpin);
        return ret | VM_FAULT_RETRY;
@@ -3437,6 +3532,8 @@ out:
  *
  * If the page does not get brought uptodate, return -EIO.
  *
+ * The function expects mapping->invalidate_lock to be already held.
+ *
  * Return: up to date page on success, ERR_PTR() on failure.
  */
 struct page *read_cache_page(struct address_space *mapping,
@@ -3460,6 +3557,8 @@ EXPORT_SYMBOL(read_cache_page);
  *
  * If the page does not get brought uptodate, return -EIO.
  *
+ * The function expects mapping->invalidate_lock to be already held.
+ *
  * Return: up to date page on success, ERR_PTR() on failure.
  */
 struct page *read_cache_page_gfp(struct address_space *mapping,
@@ -3704,12 +3803,12 @@ EXPORT_SYMBOL(generic_perform_write);
  * modification times and calls proper subroutines depending on whether we
  * do direct IO or a standard buffered write.
  *
- * It expects i_mutex to be grabbed unless we work on a block device or similar
+ * It expects i_rwsem to be grabbed unless we work on a block device or similar
  * object which does not need locking at all.
  *
  * This function does *not* take care of syncing data in case of O_SYNC write.
  * A caller has to handle it. This is mainly due to the fact that we want to
- * avoid syncing under i_mutex.
+ * avoid syncing under i_rwsem.
  *
  * Return:
  * * number of bytes written, even for truncated writes
@@ -3797,7 +3896,7 @@ EXPORT_SYMBOL(__generic_file_write_iter);
  *
  * This is a wrapper around __generic_file_write_iter() to be used by most
  * filesystems. It takes care of syncing the file in case of O_SYNC file
- * and acquires i_mutex as needed.
+ * and acquires i_rwsem as needed.
  * Return:
  * * negative error code if no data has been written at all of
  *   vfs_fsync_range() failed for a synchronous write
index 42b8b1f..b947179 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1558,9 +1558,12 @@ long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start,
                gup_flags |= FOLL_WRITE;
 
        /*
-        * See check_vma_flags(): Will return -EFAULT on incompatible mappings
-        * or with insufficient permissions.
+        * We want to report -EINVAL instead of -EFAULT for any permission
+        * problems or incompatible mappings.
         */
+       if (check_vma_flags(vma, gup_flags))
+               return -EINVAL;
+
        return __get_user_pages(mm, start, nr_pages, gup_flags,
                                NULL, NULL, locked);
 }
index dfc940d..8ea35ba 100644 (file)
@@ -2476,7 +2476,7 @@ void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
                if (!rc) {
                        /*
                         * This indicates there is an entry in the reserve map
-                        * added by alloc_huge_page.  We know it was added
+                        * not added by alloc_huge_page.  We know it was added
                         * before the alloc_huge_page call, otherwise
                         * HPageRestoreReserve would be set on the page.
                         * Remove the entry so that a subsequent allocation
@@ -4660,7 +4660,9 @@ retry_avoidcopy:
        spin_unlock(ptl);
        mmu_notifier_invalidate_range_end(&range);
 out_release_all:
-       restore_reserve_on_error(h, vma, haddr, new_page);
+       /* No restore in case of successful pagetable update (Break COW) */
+       if (new_page != old_page)
+               restore_reserve_on_error(h, vma, haddr, new_page);
        put_page(new_page);
 out_release_old:
        put_page(old_page);
@@ -4776,7 +4778,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
        pte_t new_pte;
        spinlock_t *ptl;
        unsigned long haddr = address & huge_page_mask(h);
-       bool new_page = false;
+       bool new_page, new_pagecache_page = false;
 
        /*
         * Currently, we are forced to kill the process in the event the
@@ -4799,6 +4801,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
                goto out;
 
 retry:
+       new_page = false;
        page = find_lock_page(mapping, idx);
        if (!page) {
                /* Check for page in userfault range */
@@ -4842,6 +4845,7 @@ retry:
                                        goto retry;
                                goto out;
                        }
+                       new_pagecache_page = true;
                } else {
                        lock_page(page);
                        if (unlikely(anon_vma_prepare(vma))) {
@@ -4926,7 +4930,9 @@ backout:
        spin_unlock(ptl);
 backout_unlocked:
        unlock_page(page);
-       restore_reserve_on_error(h, vma, haddr, page);
+       /* restore reserve for newly allocated pages not in page cache */
+       if (new_page && !new_pagecache_page)
+               restore_reserve_on_error(h, vma, haddr, page);
        put_page(page);
        goto out;
 }
@@ -5135,6 +5141,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
        int ret = -ENOMEM;
        struct page *page;
        int writable;
+       bool new_pagecache_page = false;
 
        if (is_continue) {
                ret = -EFAULT;
@@ -5228,6 +5235,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
                ret = huge_add_to_page_cache(page, mapping, idx);
                if (ret)
                        goto out_release_nounlock;
+               new_pagecache_page = true;
        }
 
        ptl = huge_pte_lockptr(h, dst_mm, dst_pte);
@@ -5291,7 +5299,8 @@ out_release_unlock:
        if (vm_shared || is_continue)
                unlock_page(page);
 out_release_nounlock:
-       restore_reserve_on_error(h, dst_vma, dst_addr, page);
+       if (!new_pagecache_page)
+               restore_reserve_on_error(h, dst_vma, dst_addr, page);
        put_page(page);
        goto out;
 }
index 942cbc1..eb6307c 100644 (file)
 #include <linux/tracepoint.h>
 #include <trace/events/printk.h>
 
+#include <asm/kfence.h>
+
 #include "kfence.h"
 
+/* May be overridden by <asm/kfence.h>. */
+#ifndef arch_kfence_test_address
+#define arch_kfence_test_address(addr) (addr)
+#endif
+
 /* Report as observed from console. */
 static struct {
        spinlock_t lock;
@@ -82,6 +89,7 @@ static const char *get_access_type(const struct expect_report *r)
 /* Check observed report matches information in @r. */
 static bool report_matches(const struct expect_report *r)
 {
+       unsigned long addr = (unsigned long)r->addr;
        bool ret = false;
        unsigned long flags;
        typeof(observed.lines) expect;
@@ -131,22 +139,25 @@ static bool report_matches(const struct expect_report *r)
        switch (r->type) {
        case KFENCE_ERROR_OOB:
                cur += scnprintf(cur, end - cur, "Out-of-bounds %s at", get_access_type(r));
+               addr = arch_kfence_test_address(addr);
                break;
        case KFENCE_ERROR_UAF:
                cur += scnprintf(cur, end - cur, "Use-after-free %s at", get_access_type(r));
+               addr = arch_kfence_test_address(addr);
                break;
        case KFENCE_ERROR_CORRUPTION:
                cur += scnprintf(cur, end - cur, "Corrupted memory at");
                break;
        case KFENCE_ERROR_INVALID:
                cur += scnprintf(cur, end - cur, "Invalid %s at", get_access_type(r));
+               addr = arch_kfence_test_address(addr);
                break;
        case KFENCE_ERROR_INVALID_FREE:
                cur += scnprintf(cur, end - cur, "Invalid free of");
                break;
        }
 
-       cur += scnprintf(cur, end - cur, " 0x%p", (void *)r->addr);
+       cur += scnprintf(cur, end - cur, " 0x%p", (void *)addr);
 
        spin_lock_irqsave(&observed.lock, flags);
        if (!report_available())
index 228a2fb..73d46d1 100644 (file)
@@ -290,7 +290,7 @@ static void hex_dump_object(struct seq_file *seq,
        warn_or_seq_printf(seq, "  hex dump (first %zu bytes):\n", len);
        kasan_disable_current();
        warn_or_seq_hex_dump(seq, DUMP_PREFIX_NONE, HEX_ROW_SIZE,
-                            HEX_GROUP_SIZE, ptr, len, HEX_ASCII);
+                            HEX_GROUP_SIZE, kasan_reset_tag((void *)ptr), len, HEX_ASCII);
        kasan_enable_current();
 }
 
@@ -1171,7 +1171,7 @@ static bool update_checksum(struct kmemleak_object *object)
 
        kasan_disable_current();
        kcsan_disable_current();
-       object->checksum = crc32(0, (void *)object->pointer, object->size);
+       object->checksum = crc32(0, kasan_reset_tag((void *)object->pointer), object->size);
        kasan_enable_current();
        kcsan_enable_current();
 
@@ -1246,7 +1246,7 @@ static void scan_block(void *_start, void *_end,
                        break;
 
                kasan_disable_current();
-               pointer = *ptr;
+               pointer = *(unsigned long *)kasan_reset_tag((void *)ptr);
                kasan_enable_current();
 
                untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
index 6d3d348..56324a3 100644 (file)
@@ -862,10 +862,12 @@ static long madvise_populate(struct vm_area_struct *vma,
                        switch (pages) {
                        case -EINTR:
                                return -EINTR;
-                       case -EFAULT: /* Incompatible mappings / permissions. */
+                       case -EINVAL: /* Incompatible mappings / permissions. */
                                return -EINVAL;
                        case -EHWPOISON:
                                return -EHWPOISON;
+                       case -EFAULT: /* VM_FAULT_SIGBUS or VM_FAULT_SIGSEGV */
+                               return -EFAULT;
                        default:
                                pr_warn_once("%s: unhandled return value: %ld\n",
                                             __func__, pages);
@@ -910,7 +912,7 @@ static long madvise_remove(struct vm_area_struct *vma,
                        + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
 
        /*
-        * Filesystem's fallocate may need to take i_mutex.  We need to
+        * Filesystem's fallocate may need to take i_rwsem.  We need to
         * explicitly grab a reference because the vma (and hence the
         * vma's reference to the file) can go away as soon as we drop
         * mmap_lock.
index ae1f5d0..702a81d 100644 (file)
@@ -3106,13 +3106,15 @@ void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
                stock->cached_pgdat = pgdat;
        } else if (stock->cached_pgdat != pgdat) {
                /* Flush the existing cached vmstat data */
+               struct pglist_data *oldpg = stock->cached_pgdat;
+
                if (stock->nr_slab_reclaimable_b) {
-                       mod_objcg_mlstate(objcg, pgdat, NR_SLAB_RECLAIMABLE_B,
+                       mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B,
                                          stock->nr_slab_reclaimable_b);
                        stock->nr_slab_reclaimable_b = 0;
                }
                if (stock->nr_slab_unreclaimable_b) {
-                       mod_objcg_mlstate(objcg, pgdat, NR_SLAB_UNRECLAIMABLE_B,
+                       mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B,
                                          stock->nr_slab_unreclaimable_b);
                        stock->nr_slab_unreclaimable_b = 0;
                }
@@ -3574,7 +3576,8 @@ static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
        unsigned long val;
 
        if (mem_cgroup_is_root(memcg)) {
-               cgroup_rstat_flush(memcg->css.cgroup);
+               /* mem_cgroup_threshold() calls here from irqsafe context */
+               cgroup_rstat_flush_irqsafe(memcg->css.cgroup);
                val = memcg_page_state(memcg, NR_FILE_PAGES) +
                        memcg_page_state(memcg, NR_ANON_MAPPED);
                if (swap)
index eefd823..e1f87cf 100644 (file)
@@ -866,7 +866,7 @@ static int me_pagecache_clean(struct page *p, unsigned long pfn)
        /*
         * Truncation is a bit tricky. Enable it per file system for now.
         *
-        * Open: to take i_mutex or not for this? Right now we don't.
+        * Open: to take i_rwsem or not for this? Right now we don't.
         */
        ret = truncate_error_page(p, pfn, mapping);
 out:
@@ -1146,7 +1146,7 @@ static int __get_hwpoison_page(struct page *page)
         * unexpected races caused by taking a page refcount.
         */
        if (!HWPoisonHandlable(head))
-               return 0;
+               return -EBUSY;
 
        if (PageTransHuge(head)) {
                /*
@@ -1199,9 +1199,15 @@ try_again:
                        }
                        goto out;
                } else if (ret == -EBUSY) {
-                       /* We raced with freeing huge page to buddy, retry. */
-                       if (pass++ < 3)
+                       /*
+                        * We raced with (possibly temporary) unhandlable
+                        * page, retry.
+                        */
+                       if (pass++ < 3) {
+                               shake_page(p, 1);
                                goto try_again;
+                       }
+                       ret = -EIO;
                        goto out;
                }
        }
index 8cb75b2..86c3af7 100644 (file)
@@ -1731,6 +1731,7 @@ failed_removal_isolated:
        undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
        memory_notify(MEM_CANCEL_OFFLINE, &arg);
 failed_removal_pcplists_disabled:
+       lru_cache_enable();
        zone_pcp_enable(zone);
 failed_removal:
        pr_debug("memory offlining [mem %#010llx-%#010llx] failed due to %s\n",
index 34a9ad3..7e24043 100644 (file)
@@ -2068,7 +2068,7 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
        LIST_HEAD(migratepages);
        new_page_t *new;
        bool compound;
-       unsigned int nr_pages = thp_nr_pages(page);
+       int nr_pages = thp_nr_pages(page);
 
        /*
         * PTE mapped THP or HugeTLB page can't reach here so the page could
index ca54d36..181a113 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1517,12 +1517,6 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
                        if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))
                                return -EACCES;
 
-                       /*
-                        * Make sure there are no mandatory locks on the file.
-                        */
-                       if (locks_verify_locked(file))
-                               return -EAGAIN;
-
                        vm_flags |= VM_SHARED | VM_MAYSHARE;
                        if (!(file->f_mode & FMODE_WRITE))
                                vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
index 3a93d40..9d0ad98 100644 (file)
@@ -826,9 +826,6 @@ static int validate_mmap_request(struct file *file,
                            (file->f_mode & FMODE_WRITE))
                                return -EACCES;
 
-                       if (locks_verify_locked(file))
-                               return -EAGAIN;
-
                        if (!(capabilities & NOMMU_MAP_DIRECT))
                                return -ENODEV;
 
index 9f63548..c12f67c 100644 (file)
@@ -2010,7 +2010,6 @@ int dirty_writeback_centisecs_handler(struct ctl_table *table, int write,
        return ret;
 }
 
-#ifdef CONFIG_BLOCK
 void laptop_mode_timer_fn(struct timer_list *t)
 {
        struct backing_dev_info *backing_dev_info =
@@ -2045,7 +2044,6 @@ void laptop_sync_completion(void)
 
        rcu_read_unlock();
 }
-#endif
 
 /*
  * If ratelimit_pages is too high then we can get into dirty-data overload
index 856b175..eeb3a9c 100644 (file)
@@ -3453,19 +3453,10 @@ void free_unref_page_list(struct list_head *list)
                 * comment in free_unref_page.
                 */
                migratetype = get_pcppage_migratetype(page);
-               if (unlikely(migratetype >= MIGRATE_PCPTYPES)) {
-                       if (unlikely(is_migrate_isolate(migratetype))) {
-                               list_del(&page->lru);
-                               free_one_page(page_zone(page), page, pfn, 0,
-                                                       migratetype, FPI_NONE);
-                               continue;
-                       }
-
-                       /*
-                        * Non-isolated types over MIGRATE_PCPTYPES get added
-                        * to the MIGRATE_MOVABLE pcp list.
-                        */
-                       set_pcppage_migratetype(page, MIGRATE_MOVABLE);
+               if (unlikely(is_migrate_isolate(migratetype))) {
+                       list_del(&page->lru);
+                       free_one_page(page_zone(page), page, pfn, 0, migratetype, FPI_NONE);
+                       continue;
                }
 
                set_page_private(page, pfn);
@@ -3475,7 +3466,15 @@ void free_unref_page_list(struct list_head *list)
        list_for_each_entry_safe(page, next, list, lru) {
                pfn = page_private(page);
                set_page_private(page, 0);
+
+               /*
+                * Non-isolated types over MIGRATE_PCPTYPES get added
+                * to the MIGRATE_MOVABLE pcp list.
+                */
                migratetype = get_pcppage_migratetype(page);
+               if (unlikely(migratetype >= MIGRATE_PCPTYPES))
+                       migratetype = MIGRATE_MOVABLE;
+
                trace_mm_page_free_batched(page);
                free_unref_page_commit(page, pfn, migratetype, 0);
 
index d589f14..41b75d7 100644 (file)
@@ -192,6 +192,7 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
         */
        unsigned int nofs = memalloc_nofs_save();
 
+       filemap_invalidate_lock_shared(mapping);
        /*
         * Preallocate as many pages as we will need.
         */
@@ -236,6 +237,7 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
         * will then handle the error.
         */
        read_pages(ractl, &page_pool, false);
+       filemap_invalidate_unlock_shared(mapping);
        memalloc_nofs_restore(nofs);
 }
 EXPORT_SYMBOL_GPL(page_cache_ra_unbounded);
index b9eb5c1..2d29a57 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
 /*
  * Lock ordering in mm:
  *
- * inode->i_mutex      (while writing or truncating, not reading or faulting)
+ * inode->i_rwsem      (while writing or truncating, not reading or faulting)
  *   mm->mmap_lock
- *     page->flags PG_locked (lock_page)   * (see huegtlbfs below)
- *       hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share)
- *         mapping->i_mmap_rwsem
- *           hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
- *           anon_vma->rwsem
- *             mm->page_table_lock or pte_lock
- *               swap_lock (in swap_duplicate, swap_info_get)
- *                 mmlist_lock (in mmput, drain_mmlist and others)
- *                 mapping->private_lock (in __set_page_dirty_buffers)
- *                   lock_page_memcg move_lock (in __set_page_dirty_buffers)
- *                     i_pages lock (widely used)
- *                       lruvec->lru_lock (in lock_page_lruvec_irq)
- *                 inode->i_lock (in set_page_dirty's __mark_inode_dirty)
- *                 bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty)
- *                   sb_lock (within inode_lock in fs/fs-writeback.c)
- *                   i_pages lock (widely used, in set_page_dirty,
- *                             in arch-dependent flush_dcache_mmap_lock,
- *                             within bdi.wb->list_lock in __sync_single_inode)
+ *     mapping->invalidate_lock (in filemap_fault)
+ *       page->flags PG_locked (lock_page)   * (see hugetlbfs below)
+ *         hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share)
+ *           mapping->i_mmap_rwsem
+ *             hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
+ *             anon_vma->rwsem
+ *               mm->page_table_lock or pte_lock
+ *                 swap_lock (in swap_duplicate, swap_info_get)
+ *                   mmlist_lock (in mmput, drain_mmlist and others)
+ *                   mapping->private_lock (in __set_page_dirty_buffers)
+ *                     lock_page_memcg move_lock (in __set_page_dirty_buffers)
+ *                       i_pages lock (widely used)
+ *                         lruvec->lru_lock (in lock_page_lruvec_irq)
+ *                   inode->i_lock (in set_page_dirty's __mark_inode_dirty)
+ *                   bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty)
+ *                     sb_lock (within inode_lock in fs/fs-writeback.c)
+ *                     i_pages lock (widely used, in set_page_dirty,
+ *                               in arch-dependent flush_dcache_mmap_lock,
+ *                               within bdi.wb->list_lock in __sync_single_inode)
  *
- * anon_vma->rwsem,mapping->i_mutex      (memory_failure, collect_procs_anon)
+ * anon_vma->rwsem,mapping->i_mmap_rwsem   (memory_failure, collect_procs_anon)
  *   ->tasklist_lock
  *     pte map lock
  *
index 70d9ce2..3107ace 100644 (file)
@@ -96,7 +96,7 @@ static struct vfsmount *shm_mnt;
 
 /*
  * shmem_fallocate communicates with shmem_fault or shmem_writepage via
- * inode->i_private (with i_mutex making sure that it has only one user at
+ * inode->i_private (with i_rwsem making sure that it has only one user at
  * a time): we would prefer not to enlarge the shmem inode just for that.
  */
 struct shmem_falloc {
@@ -774,7 +774,7 @@ static int shmem_free_swap(struct address_space *mapping,
  * Determine (in bytes) how many of the shmem object's pages mapped by the
  * given offsets are swapped out.
  *
- * This is safe to call without i_mutex or the i_pages lock thanks to RCU,
+ * This is safe to call without i_rwsem or the i_pages lock thanks to RCU,
  * as long as the inode doesn't go away and racy results are not a problem.
  */
 unsigned long shmem_partial_swap_usage(struct address_space *mapping,
@@ -806,7 +806,7 @@ unsigned long shmem_partial_swap_usage(struct address_space *mapping,
  * Determine (in bytes) how many of the shmem object's pages mapped by the
  * given vma is swapped out.
  *
- * This is safe to call without i_mutex or the i_pages lock thanks to RCU,
+ * This is safe to call without i_rwsem or the i_pages lock thanks to RCU,
  * as long as the inode doesn't go away and racy results are not a problem.
  */
 unsigned long shmem_swap_usage(struct vm_area_struct *vma)
@@ -1069,7 +1069,7 @@ static int shmem_setattr(struct user_namespace *mnt_userns,
                loff_t oldsize = inode->i_size;
                loff_t newsize = attr->ia_size;
 
-               /* protected by i_mutex */
+               /* protected by i_rwsem */
                if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
                    (newsize > oldsize && (info->seals & F_SEAL_GROW)))
                        return -EPERM;
@@ -1696,8 +1696,7 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index,
        struct address_space *mapping = inode->i_mapping;
        struct shmem_inode_info *info = SHMEM_I(inode);
        struct mm_struct *charge_mm = vma ? vma->vm_mm : NULL;
-       struct swap_info_struct *si;
-       struct page *page = NULL;
+       struct page *page;
        swp_entry_t swap;
        int error;
 
@@ -1705,12 +1704,6 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index,
        swap = radix_to_swp_entry(*pagep);
        *pagep = NULL;
 
-       /* Prevent swapoff from happening to us. */
-       si = get_swap_device(swap);
-       if (!si) {
-               error = EINVAL;
-               goto failed;
-       }
        /* Look it up and read it in.. */
        page = lookup_swap_cache(swap, NULL, 0);
        if (!page) {
@@ -1772,8 +1765,6 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index,
        swap_free(swap);
 
        *pagep = page;
-       if (si)
-               put_swap_device(si);
        return 0;
 failed:
        if (!shmem_confirm_swap(mapping, index, swap))
@@ -1784,9 +1775,6 @@ unlock:
                put_page(page);
        }
 
-       if (si)
-               put_swap_device(si);
-
        return error;
 }
 
@@ -2071,7 +2059,7 @@ static vm_fault_t shmem_fault(struct vm_fault *vmf)
        /*
         * Trinity finds that probing a hole which tmpfs is punching can
         * prevent the hole-punch from ever completing: which in turn
-        * locks writers out with its hold on i_mutex.  So refrain from
+        * locks writers out with its hold on i_rwsem.  So refrain from
         * faulting pages into the hole while it's being punched.  Although
         * shmem_undo_range() does remove the additions, it may be unable to
         * keep up, as each new page needs its own unmap_mapping_range() call,
@@ -2082,7 +2070,7 @@ static vm_fault_t shmem_fault(struct vm_fault *vmf)
         * we just need to make racing faults a rare case.
         *
         * The implementation below would be much simpler if we just used a
-        * standard mutex or completion: but we cannot take i_mutex in fault,
+        * standard mutex or completion: but we cannot take i_rwsem in fault,
         * and bloating every shmem inode for this unlikely case would be sad.
         */
        if (unlikely(inode->i_private)) {
@@ -2482,7 +2470,7 @@ shmem_write_begin(struct file *file, struct address_space *mapping,
        struct shmem_inode_info *info = SHMEM_I(inode);
        pgoff_t index = pos >> PAGE_SHIFT;
 
-       /* i_mutex is held by caller */
+       /* i_rwsem is held by caller */
        if (unlikely(info->seals & (F_SEAL_GROW |
                                   F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) {
                if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))
@@ -2582,7 +2570,7 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
 
                /*
                 * We must evaluate after, since reads (unlike writes)
-                * are called without i_mutex protection against truncate
+                * are called without i_rwsem protection against truncate
                 */
                nr = PAGE_SIZE;
                i_size = i_size_read(inode);
@@ -2652,7 +2640,7 @@ static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
                return -ENXIO;
 
        inode_lock(inode);
-       /* We're holding i_mutex so we can access i_size directly */
+       /* We're holding i_rwsem so we can access i_size directly */
        offset = mapping_seek_hole_data(mapping, offset, inode->i_size, whence);
        if (offset >= 0)
                offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
@@ -2681,7 +2669,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
                loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
                DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
 
-               /* protected by i_mutex */
+               /* protected by i_rwsem */
                if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
                        error = -EPERM;
                        goto out;
index f997fd5..58c01a3 100644 (file)
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -346,7 +346,7 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s_orig,
                        continue;
 
                page = virt_to_head_page(p[i]);
-               objcgs = page_objcgs(page);
+               objcgs = page_objcgs_check(page);
                if (!objcgs)
                        continue;
 
index 090fa14..f77d8cd 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -576,8 +576,8 @@ static void print_section(char *level, char *text, u8 *addr,
                          unsigned int length)
 {
        metadata_access_enable();
-       print_hex_dump(level, kasan_reset_tag(text), DUMP_PREFIX_ADDRESS,
-                       16, 1, addr, length, 1);
+       print_hex_dump(level, text, DUMP_PREFIX_ADDRESS,
+                       16, 1, kasan_reset_tag((void *)addr), length, 1);
        metadata_access_disable();
 }
 
@@ -1400,12 +1400,13 @@ check_slabs:
 static int __init setup_slub_debug(char *str)
 {
        slab_flags_t flags;
+       slab_flags_t global_flags;
        char *saved_str;
        char *slab_list;
        bool global_slub_debug_changed = false;
        bool slab_list_specified = false;
 
-       slub_debug = DEBUG_DEFAULT_FLAGS;
+       global_flags = DEBUG_DEFAULT_FLAGS;
        if (*str++ != '=' || !*str)
                /*
                 * No options specified. Switch on full debugging.
@@ -1417,7 +1418,7 @@ static int __init setup_slub_debug(char *str)
                str = parse_slub_debug_flags(str, &flags, &slab_list, true);
 
                if (!slab_list) {
-                       slub_debug = flags;
+                       global_flags = flags;
                        global_slub_debug_changed = true;
                } else {
                        slab_list_specified = true;
@@ -1426,16 +1427,18 @@ static int __init setup_slub_debug(char *str)
 
        /*
         * For backwards compatibility, a single list of flags with list of
-        * slabs means debugging is only enabled for those slabs, so the global
-        * slub_debug should be 0. We can extended that to multiple lists as
+        * slabs means debugging is only changed for those slabs, so the global
+        * slub_debug should be unchanged (0 or DEBUG_DEFAULT_FLAGS, depending
+        * on CONFIG_SLUB_DEBUG_ON). We can extended that to multiple lists as
         * long as there is no option specifying flags without a slab list.
         */
        if (slab_list_specified) {
                if (!global_slub_debug_changed)
-                       slub_debug = 0;
+                       global_flags = slub_debug;
                slub_debug_string = saved_str;
        }
 out:
+       slub_debug = global_flags;
        if (slub_debug != 0 || slub_debug_string)
                static_branch_enable(&slub_debug_enabled);
        else
@@ -3236,6 +3239,16 @@ struct detached_freelist {
        struct kmem_cache *s;
 };
 
+static inline void free_nonslab_page(struct page *page, void *object)
+{
+       unsigned int order = compound_order(page);
+
+       VM_BUG_ON_PAGE(!PageCompound(page), page);
+       kfree_hook(object);
+       mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, -(PAGE_SIZE << order));
+       __free_pages(page, order);
+}
+
 /*
  * This function progressively scans the array with free objects (with
  * a limited look ahead) and extract objects belonging to the same
@@ -3272,9 +3285,7 @@ int build_detached_freelist(struct kmem_cache *s, size_t size,
        if (!s) {
                /* Handle kalloc'ed objects */
                if (unlikely(!PageSlab(page))) {
-                       BUG_ON(!PageCompound(page));
-                       kfree_hook(object);
-                       __free_pages(page, compound_order(page));
+                       free_nonslab_page(page, object);
                        p[size] = NULL; /* mark object processed */
                        return size;
                }
@@ -4250,13 +4261,7 @@ void kfree(const void *x)
 
        page = virt_to_head_page(x);
        if (unlikely(!PageSlab(page))) {
-               unsigned int order = compound_order(page);
-
-               BUG_ON(!PageCompound(page));
-               kfree_hook(object);
-               mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
-                                     -(PAGE_SIZE << order));
-               __free_pages(page, order);
+               free_nonslab_page(page, object);
                return;
        }
        slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);
index a66f3e0..16f706c 100644 (file)
@@ -70,9 +70,9 @@ void disable_swap_slots_cache_lock(void)
        swap_slot_cache_enabled = false;
        if (swap_slot_cache_initialized) {
                /* serialize with cpu hotplug operations */
-               get_online_cpus();
+               cpus_read_lock();
                __drain_swap_slots_cache(SLOTS_CACHE|SLOTS_CACHE_RET);
-               put_online_cpus();
+               cpus_read_unlock();
        }
 }
 
index c56aa9a..bc7cee6 100644 (file)
@@ -628,13 +628,6 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
        if (!mask)
                goto skip;
 
-       /* Test swap type to make sure the dereference is safe */
-       if (likely(si->flags & (SWP_BLKDEV | SWP_FS_OPS))) {
-               struct inode *inode = si->swap_file->f_mapping->host;
-               if (inode_read_congested(inode))
-                       goto skip;
-       }
-
        do_poll = false;
        /* Read a page_cluster sized and aligned cluster around offset. */
        start_offset = offset & ~mask;
index 234ddd8..44ad5e5 100644 (file)
@@ -412,7 +412,8 @@ EXPORT_SYMBOL(truncate_inode_pages_range);
  * @mapping: mapping to truncate
  * @lstart: offset from which to truncate
  *
- * Called under (and serialised by) inode->i_mutex.
+ * Called under (and serialised by) inode->i_rwsem and
+ * mapping->invalidate_lock.
  *
  * Note: When this function returns, there can be a page in the process of
  * deletion (inside __delete_from_page_cache()) in the specified range.  Thus
@@ -429,7 +430,7 @@ EXPORT_SYMBOL(truncate_inode_pages);
  * truncate_inode_pages_final - truncate *all* pages before inode dies
  * @mapping: mapping to truncate
  *
- * Called under (and serialized by) inode->i_mutex.
+ * Called under (and serialized by) inode->i_rwsem.
  *
  * Filesystems have to use this in the .evict_inode path to inform the
  * VM that this is the final truncate and the inode is going away.
@@ -748,7 +749,7 @@ EXPORT_SYMBOL(truncate_pagecache);
  * setattr function when ATTR_SIZE is passed in.
  *
  * Must be called with a lock serializing truncates and writes (generally
- * i_mutex but e.g. xfs uses a different lock) and before all filesystem
+ * i_rwsem but e.g. xfs uses a different lock) and before all filesystem
  * specific block truncation has been performed.
  */
 void truncate_setsize(struct inode *inode, loff_t newsize)
@@ -777,7 +778,7 @@ EXPORT_SYMBOL(truncate_setsize);
  *
  * The function must be called after i_size is updated so that page fault
  * coming after we unlock the page will already see the new i_size.
- * The function must be called while we still hold i_mutex - this not only
+ * The function must be called while we still hold i_rwsem - this not only
  * makes sure i_size is stable but also that userspace cannot observe new
  * i_size value before we are prepared to store mmap writes at new inode size.
  */
index 4620df6..eeae2f6 100644 (file)
@@ -100,9 +100,12 @@ struct scan_control {
        unsigned int may_swap:1;
 
        /*
-        * Cgroups are not reclaimed below their configured memory.low,
-        * unless we threaten to OOM. If any cgroups are skipped due to
-        * memory.low and nothing was reclaimed, go back for memory.low.
+        * Cgroup memory below memory.low is protected as long as we
+        * don't threaten to OOM. If any cgroup is reclaimed at
+        * reduced force or passed over entirely due to its memory.low
+        * setting (memcg_low_skipped), and nothing is reclaimed as a
+        * result, then go back for one more cycle that reclaims the protected
+        * memory (memcg_low_reclaim) to avert OOM.
         */
        unsigned int memcg_low_reclaim:1;
        unsigned int memcg_low_skipped:1;
@@ -2537,15 +2540,14 @@ out:
        for_each_evictable_lru(lru) {
                int file = is_file_lru(lru);
                unsigned long lruvec_size;
+               unsigned long low, min;
                unsigned long scan;
-               unsigned long protection;
 
                lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx);
-               protection = mem_cgroup_protection(sc->target_mem_cgroup,
-                                                  memcg,
-                                                  sc->memcg_low_reclaim);
+               mem_cgroup_protection(sc->target_mem_cgroup, memcg,
+                                     &min, &low);
 
-               if (protection) {
+               if (min || low) {
                        /*
                         * Scale a cgroup's reclaim pressure by proportioning
                         * its current usage to its memory.low or memory.min
@@ -2576,6 +2578,15 @@ out:
                         * hard protection.
                         */
                        unsigned long cgroup_size = mem_cgroup_size(memcg);
+                       unsigned long protection;
+
+                       /* memory.low scaling, make sure we retry before OOM */
+                       if (!sc->memcg_low_reclaim && low > min) {
+                               protection = low;
+                               sc->memcg_low_skipped = 1;
+                       } else {
+                               protection = min;
+                       }
 
                        /* Avoid TOCTOU with earlier protection check */
                        cgroup_size = max(cgroup_size, protection);
@@ -4413,11 +4424,13 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in
                .may_swap = 1,
                .reclaim_idx = gfp_zone(gfp_mask),
        };
+       unsigned long pflags;
 
        trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, order,
                                           sc.gfp_mask);
 
        cond_resched();
+       psi_memstall_enter(&pflags);
        fs_reclaim_acquire(sc.gfp_mask);
        /*
         * We need to be able to allocate from the reserves for RECLAIM_UNMAP
@@ -4442,6 +4455,7 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in
        current->flags &= ~PF_SWAPWRITE;
        memalloc_noreclaim_restore(noreclaim_flag);
        fs_reclaim_release(sc.gfp_mask);
+       psi_memstall_leave(&pflags);
 
        trace_mm_vmscan_node_reclaim_end(sc.nr_reclaimed);
 
index b0534e0..a7ed56a 100644 (file)
@@ -129,9 +129,9 @@ static void sum_vm_events(unsigned long *ret)
 */
 void all_vm_events(unsigned long *ret)
 {
-       get_online_cpus();
+       cpus_read_lock();
        sum_vm_events(ret);
-       put_online_cpus();
+       cpus_read_unlock();
 }
 EXPORT_SYMBOL_GPL(all_vm_events);
 
@@ -1948,7 +1948,7 @@ static void vmstat_shepherd(struct work_struct *w)
 {
        int cpu;
 
-       get_online_cpus();
+       cpus_read_lock();
        /* Check processors whose vmstat worker threads have been disabled */
        for_each_online_cpu(cpu) {
                struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
@@ -1958,7 +1958,7 @@ static void vmstat_shepherd(struct work_struct *w)
 
                cond_resched();
        }
-       put_online_cpus();
+       cpus_read_unlock();
 
        schedule_delayed_work(&shepherd,
                round_jiffies_relative(sysctl_stat_interval));
@@ -2037,9 +2037,9 @@ void __init init_mm_internals(void)
        if (ret < 0)
                pr_err("vmstat: failed to register 'online' hotplug state\n");
 
-       get_online_cpus();
+       cpus_read_lock();
        init_cpu_node_state();
-       put_online_cpus();
+       cpus_read_unlock();
 
        start_shepherd_timer();
 #endif
index 2560ed2..e1a545c 100644 (file)
@@ -3996,14 +3996,10 @@ EXPORT_SYMBOL(hci_register_dev);
 /* Unregister HCI device */
 void hci_unregister_dev(struct hci_dev *hdev)
 {
-       int id;
-
        BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
 
        hci_dev_set_flag(hdev, HCI_UNREGISTER);
 
-       id = hdev->id;
-
        write_lock(&hci_dev_list_lock);
        list_del(&hdev->list);
        write_unlock(&hci_dev_list_lock);
@@ -4038,7 +4034,14 @@ void hci_unregister_dev(struct hci_dev *hdev)
        }
 
        device_del(&hdev->dev);
+       /* Actual cleanup is deferred until hci_cleanup_dev(). */
+       hci_dev_put(hdev);
+}
+EXPORT_SYMBOL(hci_unregister_dev);
 
+/* Cleanup HCI device */
+void hci_cleanup_dev(struct hci_dev *hdev)
+{
        debugfs_remove_recursive(hdev->debugfs);
        kfree_const(hdev->hw_info);
        kfree_const(hdev->fw_info);
@@ -4063,11 +4066,8 @@ void hci_unregister_dev(struct hci_dev *hdev)
        hci_blocked_keys_clear(hdev);
        hci_dev_unlock(hdev);
 
-       hci_dev_put(hdev);
-
-       ida_simple_remove(&hci_index_ida, id);
+       ida_simple_remove(&hci_index_ida, hdev->id);
 }
-EXPORT_SYMBOL(hci_unregister_dev);
 
 /* Suspend HCI device */
 int hci_suspend_dev(struct hci_dev *hdev)
index b04a5a0..f1128c2 100644 (file)
@@ -59,6 +59,17 @@ struct hci_pinfo {
        char              comm[TASK_COMM_LEN];
 };
 
+static struct hci_dev *hci_hdev_from_sock(struct sock *sk)
+{
+       struct hci_dev *hdev = hci_pi(sk)->hdev;
+
+       if (!hdev)
+               return ERR_PTR(-EBADFD);
+       if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
+               return ERR_PTR(-EPIPE);
+       return hdev;
+}
+
 void hci_sock_set_flag(struct sock *sk, int nr)
 {
        set_bit(nr, &hci_pi(sk)->flags);
@@ -759,19 +770,13 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event)
        if (event == HCI_DEV_UNREG) {
                struct sock *sk;
 
-               /* Detach sockets from device */
+               /* Wake up sockets using this dead device */
                read_lock(&hci_sk_list.lock);
                sk_for_each(sk, &hci_sk_list.head) {
-                       lock_sock(sk);
                        if (hci_pi(sk)->hdev == hdev) {
-                               hci_pi(sk)->hdev = NULL;
                                sk->sk_err = EPIPE;
-                               sk->sk_state = BT_OPEN;
                                sk->sk_state_change(sk);
-
-                               hci_dev_put(hdev);
                        }
-                       release_sock(sk);
                }
                read_unlock(&hci_sk_list.lock);
        }
@@ -930,10 +935,10 @@ static int hci_sock_reject_list_del(struct hci_dev *hdev, void __user *arg)
 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
                                unsigned long arg)
 {
-       struct hci_dev *hdev = hci_pi(sk)->hdev;
+       struct hci_dev *hdev = hci_hdev_from_sock(sk);
 
-       if (!hdev)
-               return -EBADFD;
+       if (IS_ERR(hdev))
+               return PTR_ERR(hdev);
 
        if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
                return -EBUSY;
@@ -1103,6 +1108,18 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
 
        lock_sock(sk);
 
+       /* Allow detaching from dead device and attaching to alive device, if
+        * the caller wants to re-bind (instead of close) this socket in
+        * response to hci_sock_dev_event(HCI_DEV_UNREG) notification.
+        */
+       hdev = hci_pi(sk)->hdev;
+       if (hdev && hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
+               hci_pi(sk)->hdev = NULL;
+               sk->sk_state = BT_OPEN;
+               hci_dev_put(hdev);
+       }
+       hdev = NULL;
+
        if (sk->sk_state == BT_BOUND) {
                err = -EALREADY;
                goto done;
@@ -1379,9 +1396,9 @@ static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
 
        lock_sock(sk);
 
-       hdev = hci_pi(sk)->hdev;
-       if (!hdev) {
-               err = -EBADFD;
+       hdev = hci_hdev_from_sock(sk);
+       if (IS_ERR(hdev)) {
+               err = PTR_ERR(hdev);
                goto done;
        }
 
@@ -1743,9 +1760,9 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
                goto done;
        }
 
-       hdev = hci_pi(sk)->hdev;
-       if (!hdev) {
-               err = -EBADFD;
+       hdev = hci_hdev_from_sock(sk);
+       if (IS_ERR(hdev)) {
+               err = PTR_ERR(hdev);
                goto done;
        }
 
index 9874844..b69d88b 100644 (file)
@@ -83,6 +83,9 @@ void hci_conn_del_sysfs(struct hci_conn *conn)
 static void bt_host_release(struct device *dev)
 {
        struct hci_dev *hdev = to_hci_dev(dev);
+
+       if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
+               hci_cleanup_dev(hdev);
        kfree(hdev);
        module_put(THIS_MODULE);
 }
index 1cc75c8..caa16bf 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/vmalloc.h>
 #include <linux/etherdevice.h>
 #include <linux/filter.h>
+#include <linux/rcupdate_trace.h>
 #include <linux/sched/signal.h>
 #include <net/bpf_sk_storage.h>
 #include <net/sock.h>
@@ -951,7 +952,10 @@ int bpf_prog_test_run_syscall(struct bpf_prog *prog,
                        goto out;
                }
        }
+
+       rcu_read_lock_trace();
        retval = bpf_prog_run_pin_on_cpu(prog, ctx);
+       rcu_read_unlock_trace();
 
        if (copy_to_user(&uattr->test.retval, &retval, sizeof(u32))) {
                err = -EFAULT;
index a16191d..5dee309 100644 (file)
@@ -1019,7 +1019,8 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
 
 static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br,
                        struct net_bridge_port *p, const unsigned char *addr,
-                       u16 nlh_flags, u16 vid, struct nlattr *nfea_tb[])
+                       u16 nlh_flags, u16 vid, struct nlattr *nfea_tb[],
+                       struct netlink_ext_ack *extack)
 {
        int err = 0;
 
@@ -1038,6 +1039,11 @@ static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br,
                rcu_read_unlock();
                local_bh_enable();
        } else if (ndm->ndm_flags & NTF_EXT_LEARNED) {
+               if (!p && !(ndm->ndm_state & NUD_PERMANENT)) {
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "FDB entry towards bridge must be permanent");
+                       return -EINVAL;
+               }
                err = br_fdb_external_learn_add(br, p, addr, vid, true);
        } else {
                spin_lock_bh(&br->hash_lock);
@@ -1110,9 +1116,11 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
                }
 
                /* VID was specified, so use it. */
-               err = __br_fdb_add(ndm, br, p, addr, nlh_flags, vid, nfea_tb);
+               err = __br_fdb_add(ndm, br, p, addr, nlh_flags, vid, nfea_tb,
+                                  extack);
        } else {
-               err = __br_fdb_add(ndm, br, p, addr, nlh_flags, 0, nfea_tb);
+               err = __br_fdb_add(ndm, br, p, addr, nlh_flags, 0, nfea_tb,
+                                  extack);
                if (err || !vg || !vg->num_vlans)
                        goto out;
 
@@ -1124,7 +1132,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
                        if (!br_vlan_should_use(v))
                                continue;
                        err = __br_fdb_add(ndm, br, p, addr, nlh_flags, v->vid,
-                                          nfea_tb);
+                                          nfea_tb, extack);
                        if (err)
                                goto out;
                }
@@ -1281,6 +1289,10 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
 
                if (swdev_notify)
                        flags |= BIT(BR_FDB_ADDED_BY_USER);
+
+               if (!p)
+                       flags |= BIT(BR_FDB_LOCAL);
+
                fdb = fdb_create(br, p, addr, vid, flags);
                if (!fdb) {
                        err = -ENOMEM;
@@ -1307,6 +1319,9 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
                if (swdev_notify)
                        set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
 
+               if (!p)
+                       set_bit(BR_FDB_LOCAL, &fdb->flags);
+
                if (modified)
                        fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
        }
index 6e4a323..14cd6ef 100644 (file)
@@ -616,6 +616,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev,
 
        err = dev_set_allmulti(dev, 1);
        if (err) {
+               br_multicast_del_port(p);
                kfree(p);       /* kobject not yet init'd, manually free */
                goto err1;
        }
@@ -729,6 +730,7 @@ err4:
 err3:
        sysfs_remove_link(br->ifobj, p->dev->name);
 err2:
+       br_multicast_del_port(p);
        kobject_put(&p->kobj);
        dev_set_allmulti(dev, -1);
 err1:
index 8d033a7..fdbed31 100644 (file)
@@ -88,6 +88,12 @@ static int nf_br_ip_fragment(struct net *net, struct sock *sk,
 
                        skb = ip_fraglist_next(&iter);
                }
+
+               if (!err)
+                       return 0;
+
+               kfree_skb_list(iter.frag);
+
                return err;
        }
 slow_path:
index c3946c3..bdc95bd 100644 (file)
@@ -1075,11 +1075,16 @@ static bool j1939_session_deactivate_locked(struct j1939_session *session)
 
 static bool j1939_session_deactivate(struct j1939_session *session)
 {
+       struct j1939_priv *priv = session->priv;
        bool active;
 
-       j1939_session_list_lock(session->priv);
+       j1939_session_list_lock(priv);
+       /* This function should be called with a session ref-count of at
+        * least 2.
+        */
+       WARN_ON_ONCE(kref_read(&session->kref) < 2);
        active = j1939_session_deactivate_locked(session);
-       j1939_session_list_unlock(session->priv);
+       j1939_session_list_unlock(priv);
 
        return active;
 }
@@ -1869,7 +1874,7 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
                if (!session->transmission)
                        j1939_tp_schedule_txtimer(session, 0);
        } else {
-               j1939_tp_set_rxtimeout(session, 250);
+               j1939_tp_set_rxtimeout(session, 750);
        }
        session->last_cmd = 0xff;
        consume_skb(se_skb);
index ed4fcb7..cd5a493 100644 (file)
@@ -546,10 +546,18 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
                                return -EFAULT;
                }
 
+               rtnl_lock();
                lock_sock(sk);
 
-               if (ro->bound && ro->ifindex)
+               if (ro->bound && ro->ifindex) {
                        dev = dev_get_by_index(sock_net(sk), ro->ifindex);
+                       if (!dev) {
+                               if (count > 1)
+                                       kfree(filter);
+                               err = -ENODEV;
+                               goto out_fil;
+                       }
+               }
 
                if (ro->bound) {
                        /* (try to) register the new filters */
@@ -588,6 +596,7 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
                        dev_put(dev);
 
                release_sock(sk);
+               rtnl_unlock();
 
                break;
 
@@ -600,10 +609,16 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
 
                err_mask &= CAN_ERR_MASK;
 
+               rtnl_lock();
                lock_sock(sk);
 
-               if (ro->bound && ro->ifindex)
+               if (ro->bound && ro->ifindex) {
                        dev = dev_get_by_index(sock_net(sk), ro->ifindex);
+                       if (!dev) {
+                               err = -ENODEV;
+                               goto out_err;
+                       }
+               }
 
                /* remove current error mask */
                if (ro->bound) {
@@ -627,6 +642,7 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
                        dev_put(dev);
 
                release_sock(sk);
+               rtnl_unlock();
 
                break;
 
index 8fdd04f..8503262 100644 (file)
@@ -9328,18 +9328,10 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port,
 
        switch (attrs->flavour) {
        case DEVLINK_PORT_FLAVOUR_PHYSICAL:
-       case DEVLINK_PORT_FLAVOUR_VIRTUAL:
                n = snprintf(name, len, "p%u", attrs->phys.port_number);
                if (n < len && attrs->split)
                        n += snprintf(name + n, len - n, "s%u",
                                      attrs->phys.split_subport_number);
-               if (!attrs->split)
-                       n = snprintf(name, len, "p%u", attrs->phys.port_number);
-               else
-                       n = snprintf(name, len, "p%us%u",
-                                    attrs->phys.port_number,
-                                    attrs->phys.split_subport_number);
-
                break;
        case DEVLINK_PORT_FLAVOUR_CPU:
        case DEVLINK_PORT_FLAVOUR_DSA:
@@ -9381,6 +9373,8 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port,
                n = snprintf(name, len, "pf%usf%u", attrs->pci_sf.pf,
                             attrs->pci_sf.sf);
                break;
+       case DEVLINK_PORT_FLAVOUR_VIRTUAL:
+               return -EOPNOTSUPP;
        }
 
        if (n >= len)
index 2aadbfc..4b2415d 100644 (file)
@@ -1504,7 +1504,7 @@ __be32 flow_get_u32_dst(const struct flow_keys *flow)
 }
 EXPORT_SYMBOL(flow_get_u32_dst);
 
-/* Sort the source and destination IP (and the ports if the IP are the same),
+/* Sort the source and destination IP and the ports,
  * to have consistent hash within the two directions
  */
 static inline void __flow_hash_consistentify(struct flow_keys *keys)
@@ -1515,11 +1515,11 @@ static inline void __flow_hash_consistentify(struct flow_keys *keys)
        case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
                addr_diff = (__force u32)keys->addrs.v4addrs.dst -
                            (__force u32)keys->addrs.v4addrs.src;
-               if ((addr_diff < 0) ||
-                   (addr_diff == 0 &&
-                    ((__force u16)keys->ports.dst <
-                     (__force u16)keys->ports.src))) {
+               if (addr_diff < 0)
                        swap(keys->addrs.v4addrs.src, keys->addrs.v4addrs.dst);
+
+               if ((__force u16)keys->ports.dst <
+                   (__force u16)keys->ports.src) {
                        swap(keys->ports.src, keys->ports.dst);
                }
                break;
@@ -1527,13 +1527,13 @@ static inline void __flow_hash_consistentify(struct flow_keys *keys)
                addr_diff = memcmp(&keys->addrs.v6addrs.dst,
                                   &keys->addrs.v6addrs.src,
                                   sizeof(keys->addrs.v6addrs.dst));
-               if ((addr_diff < 0) ||
-                   (addr_diff == 0 &&
-                    ((__force u16)keys->ports.dst <
-                     (__force u16)keys->ports.src))) {
+               if (addr_diff < 0) {
                        for (i = 0; i < 4; i++)
                                swap(keys->addrs.v6addrs.src.s6_addr32[i],
                                     keys->addrs.v6addrs.dst.s6_addr32[i]);
+               }
+               if ((__force u16)keys->ports.dst <
+                   (__force u16)keys->ports.src) {
                        swap(keys->ports.src, keys->ports.dst);
                }
                break;
index 75431ca..1a45584 100644 (file)
@@ -158,7 +158,7 @@ static void linkwatch_do_dev(struct net_device *dev)
        clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state);
 
        rfc2863_policy(dev);
-       if (dev->flags & IFF_UP && netif_device_present(dev)) {
+       if (dev->flags & IFF_UP) {
                if (netif_carrier_ok(dev))
                        dev_activate(dev);
                else
@@ -204,7 +204,8 @@ static void __linkwatch_run_queue(int urgent_only)
                dev = list_first_entry(&wrk, struct net_device, link_watch_list);
                list_del_init(&dev->link_watch_list);
 
-               if (urgent_only && !linkwatch_urgent_event(dev)) {
+               if (!netif_device_present(dev) ||
+                   (urgent_only && !linkwatch_urgent_event(dev))) {
                        list_add_tail(&dev->link_watch_list, &lweventlist);
                        continue;
                }
index 5e4eb45..8ab7b40 100644 (file)
@@ -634,7 +634,15 @@ bool page_pool_return_skb_page(struct page *page)
        struct page_pool *pp;
 
        page = compound_head(page);
-       if (unlikely(page->pp_magic != PP_SIGNATURE))
+
+       /* page->pp_magic is OR'ed with PP_SIGNATURE after the allocation
+        * in order to preserve any existing bits, such as bit 0 for the
+        * head page of compound page and bit 1 for pfmemalloc page, so
+        * mask those bits for freeing side when doing below checking,
+        * and page_is_pfmemalloc() is checked in __page_pool_put_page()
+        * to avoid recycling the pfmemalloc page.
+        */
+       if (unlikely((page->pp_magic & ~0x3UL) != PP_SIGNATURE))
                return false;
 
        pp = page->pp;
index f6af3e7..662eb1c 100644 (file)
@@ -2608,6 +2608,7 @@ static int do_setlink(const struct sk_buff *skb,
                return err;
 
        if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_TARGET_NETNSID]) {
+               const char *pat = ifname && ifname[0] ? ifname : NULL;
                struct net *net;
                int new_ifindex;
 
@@ -2623,7 +2624,7 @@ static int do_setlink(const struct sk_buff *skb,
                else
                        new_ifindex = 0;
 
-               err = __dev_change_net_namespace(dev, net, ifname, new_ifindex);
+               err = __dev_change_net_namespace(dev, net, pat, new_ifindex);
                put_net(net);
                if (err)
                        goto errout;
index 15d7128..2d6249b 100644 (file)
@@ -584,29 +584,42 @@ static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
        return sk_psock_skb_ingress(psock, skb);
 }
 
-static void sock_drop(struct sock *sk, struct sk_buff *skb)
+static void sk_psock_skb_state(struct sk_psock *psock,
+                              struct sk_psock_work_state *state,
+                              struct sk_buff *skb,
+                              int len, int off)
 {
-       sk_drops_add(sk, skb);
-       kfree_skb(skb);
+       spin_lock_bh(&psock->ingress_lock);
+       if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
+               state->skb = skb;
+               state->len = len;
+               state->off = off;
+       } else {
+               sock_drop(psock->sk, skb);
+       }
+       spin_unlock_bh(&psock->ingress_lock);
 }
 
 static void sk_psock_backlog(struct work_struct *work)
 {
        struct sk_psock *psock = container_of(work, struct sk_psock, work);
        struct sk_psock_work_state *state = &psock->work_state;
-       struct sk_buff *skb;
+       struct sk_buff *skb = NULL;
        bool ingress;
        u32 len, off;
        int ret;
 
        mutex_lock(&psock->work_mutex);
-       if (state->skb) {
+       if (unlikely(state->skb)) {
+               spin_lock_bh(&psock->ingress_lock);
                skb = state->skb;
                len = state->len;
                off = state->off;
                state->skb = NULL;
-               goto start;
+               spin_unlock_bh(&psock->ingress_lock);
        }
+       if (skb)
+               goto start;
 
        while ((skb = skb_dequeue(&psock->ingress_skb))) {
                len = skb->len;
@@ -621,9 +634,8 @@ start:
                                                          len, ingress);
                        if (ret <= 0) {
                                if (ret == -EAGAIN) {
-                                       state->skb = skb;
-                                       state->len = len;
-                                       state->off = off;
+                                       sk_psock_skb_state(psock, state, skb,
+                                                          len, off);
                                        goto end;
                                }
                                /* Hard errors break pipe and stop xmit. */
@@ -722,6 +734,11 @@ static void __sk_psock_zap_ingress(struct sk_psock *psock)
                skb_bpf_redirect_clear(skb);
                sock_drop(psock->sk, skb);
        }
+       kfree_skb(psock->work_state.skb);
+       /* We null the skb here to ensure that calls to sk_psock_backlog
+        * do not pick up the free'd skb.
+        */
+       psock->work_state.skb = NULL;
        __sk_psock_purge_ingress_msg(psock);
 }
 
@@ -773,8 +790,6 @@ static void sk_psock_destroy(struct work_struct *work)
 
 void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
 {
-       sk_psock_stop(psock, false);
-
        write_lock_bh(&sk->sk_callback_lock);
        sk_psock_restore_proto(sk, psock);
        rcu_assign_sk_user_data(sk, NULL);
@@ -784,6 +799,8 @@ void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
                sk_psock_stop_verdict(sk, psock);
        write_unlock_bh(&sk->sk_callback_lock);
 
+       sk_psock_stop(psock, false);
+
        INIT_RCU_WORK(&psock->rwork, sk_psock_destroy);
        queue_rcu_work(system_wq, &psock->rwork);
 }
index 9cc9d1e..c5c1d2b 100644 (file)
@@ -41,9 +41,9 @@ extern bool dccp_debug;
 #define dccp_pr_debug_cat(format, a...)   DCCP_PRINTK(dccp_debug, format, ##a)
 #define dccp_debug(fmt, a...)            dccp_pr_debug_cat(KERN_DEBUG fmt, ##a)
 #else
-#define dccp_pr_debug(format, a...)
-#define dccp_pr_debug_cat(format, a...)
-#define dccp_debug(format, a...)
+#define dccp_pr_debug(format, a...)      do {} while (0)
+#define dccp_pr_debug_cat(format, a...)          do {} while (0)
+#define dccp_debug(format, a...)         do {} while (0)
 #endif
 
 extern struct inet_hashinfo dccp_hashinfo;
index 532085d..23be8e0 100644 (file)
@@ -2291,8 +2291,8 @@ static int dsa_slave_netdevice_event(struct notifier_block *nb,
 static void
 dsa_fdb_offload_notify(struct dsa_switchdev_event_work *switchdev_work)
 {
+       struct switchdev_notifier_fdb_info info = {};
        struct dsa_switch *ds = switchdev_work->ds;
-       struct switchdev_notifier_fdb_info info;
        struct dsa_port *dp;
 
        if (!dsa_is_user_port(ds, switchdev_work->port))
index a45a040..c25f761 100644 (file)
@@ -984,6 +984,11 @@ static const struct proto_ops ieee802154_dgram_ops = {
        .sendpage          = sock_no_sendpage,
 };
 
+static void ieee802154_sock_destruct(struct sock *sk)
+{
+       skb_queue_purge(&sk->sk_receive_queue);
+}
+
 /* Create a socket. Initialise the socket, blank the addresses
  * set the state.
  */
@@ -1024,7 +1029,7 @@ static int ieee802154_create(struct net *net, struct socket *sock,
        sock->ops = ops;
 
        sock_init_data(sock, sk);
-       /* FIXME: sk->sk_destruct */
+       sk->sk_destruct = ieee802154_sock_destruct;
        sk->sk_family = PF_IEEE802154;
 
        /* Checksums on by default */
index 099259f..7fbd0b5 100644 (file)
@@ -465,14 +465,16 @@ void cipso_v4_doi_free(struct cipso_v4_doi *doi_def)
        if (!doi_def)
                return;
 
-       switch (doi_def->type) {
-       case CIPSO_V4_MAP_TRANS:
-               kfree(doi_def->map.std->lvl.cipso);
-               kfree(doi_def->map.std->lvl.local);
-               kfree(doi_def->map.std->cat.cipso);
-               kfree(doi_def->map.std->cat.local);
-               kfree(doi_def->map.std);
-               break;
+       if (doi_def->map.std) {
+               switch (doi_def->type) {
+               case CIPSO_V4_MAP_TRANS:
+                       kfree(doi_def->map.std->lvl.cipso);
+                       kfree(doi_def->map.std->lvl.local);
+                       kfree(doi_def->map.std->cat.cipso);
+                       kfree(doi_def->map.std->cat.local);
+                       kfree(doi_def->map.std);
+                       break;
+               }
        }
        kfree(doi_def);
 }
index 6b3c558..00576ba 100644 (file)
@@ -803,10 +803,17 @@ static void igmp_gq_timer_expire(struct timer_list *t)
 static void igmp_ifc_timer_expire(struct timer_list *t)
 {
        struct in_device *in_dev = from_timer(in_dev, t, mr_ifc_timer);
+       u32 mr_ifc_count;
 
        igmpv3_send_cr(in_dev);
-       if (in_dev->mr_ifc_count) {
-               in_dev->mr_ifc_count--;
+restart:
+       mr_ifc_count = READ_ONCE(in_dev->mr_ifc_count);
+
+       if (mr_ifc_count) {
+               if (cmpxchg(&in_dev->mr_ifc_count,
+                           mr_ifc_count,
+                           mr_ifc_count - 1) != mr_ifc_count)
+                       goto restart;
                igmp_ifc_start_timer(in_dev,
                                     unsolicited_report_interval(in_dev));
        }
@@ -818,7 +825,7 @@ static void igmp_ifc_event(struct in_device *in_dev)
        struct net *net = dev_net(in_dev->dev);
        if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev))
                return;
-       in_dev->mr_ifc_count = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
+       WRITE_ONCE(in_dev->mr_ifc_count, in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv);
        igmp_ifc_start_timer(in_dev, 1);
 }
 
@@ -957,7 +964,7 @@ static bool igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
                                in_dev->mr_qri;
                }
                /* cancel the interface change timer */
-               in_dev->mr_ifc_count = 0;
+               WRITE_ONCE(in_dev->mr_ifc_count, 0);
                if (del_timer(&in_dev->mr_ifc_timer))
                        __in_dev_put(in_dev);
                /* clear deleted report items */
@@ -1724,7 +1731,7 @@ void ip_mc_down(struct in_device *in_dev)
                igmp_group_dropped(pmc);
 
 #ifdef CONFIG_IP_MULTICAST
-       in_dev->mr_ifc_count = 0;
+       WRITE_ONCE(in_dev->mr_ifc_count, 0);
        if (del_timer(&in_dev->mr_ifc_timer))
                __in_dev_put(in_dev);
        in_dev->mr_gq_running = 0;
@@ -1941,7 +1948,7 @@ static int ip_mc_del_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
                pmc->sfmode = MCAST_INCLUDE;
 #ifdef CONFIG_IP_MULTICAST
                pmc->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
-               in_dev->mr_ifc_count = pmc->crcount;
+               WRITE_ONCE(in_dev->mr_ifc_count, pmc->crcount);
                for (psf = pmc->sources; psf; psf = psf->sf_next)
                        psf->sf_crcount = 0;
                igmp_ifc_event(pmc->interface);
@@ -2120,7 +2127,7 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
                /* else no filters; keep old mode for reports */
 
                pmc->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
-               in_dev->mr_ifc_count = pmc->crcount;
+               WRITE_ONCE(in_dev->mr_ifc_count, pmc->crcount);
                for (psf = pmc->sources; psf; psf = psf->sf_next)
                        psf->sf_crcount = 0;
                igmp_ifc_event(in_dev);
index 12dca0c..95419b7 100644 (file)
@@ -473,6 +473,8 @@ static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
 
 static int gre_handle_offloads(struct sk_buff *skb, bool csum)
 {
+       if (csum && skb_checksum_start(skb) < skb->data)
+               return -EINVAL;
        return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
 }
 
index 0dca007..be75b40 100644 (file)
@@ -390,7 +390,7 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
                tunnel->i_seqno = ntohl(tpi->seq) + 1;
        }
 
-       skb_reset_network_header(skb);
+       skb_set_network_header(skb, (tunnel->dev->type == ARPHRD_ETHER) ? ETH_HLEN : 0);
 
        err = IP_ECN_decapsulate(iph, skb);
        if (unlikely(err)) {
index 99c0694..a6f20ee 100644 (file)
@@ -600,14 +600,14 @@ static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash)
        return oldest;
 }
 
-static inline u32 fnhe_hashfun(__be32 daddr)
+static u32 fnhe_hashfun(__be32 daddr)
 {
-       static u32 fnhe_hashrnd __read_mostly;
-       u32 hval;
+       static siphash_key_t fnhe_hash_key __read_mostly;
+       u64 hval;
 
-       net_get_random_once(&fnhe_hashrnd, sizeof(fnhe_hashrnd));
-       hval = jhash_1word((__force u32)daddr, fnhe_hashrnd);
-       return hash_32(hval, FNHE_HASH_SHIFT);
+       net_get_random_once(&fnhe_hash_key, sizeof(fnhe_hash_key));
+       hval = siphash_1u32((__force u32)daddr, &fnhe_hash_key);
+       return hash_64(hval, FNHE_HASH_SHIFT);
 }
 
 static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
index 6ea3dc2..6274462 100644 (file)
@@ -1041,7 +1041,7 @@ static void bbr_init(struct sock *sk)
        bbr->prior_cwnd = 0;
        tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
        bbr->rtt_cnt = 0;
-       bbr->next_rtt_delivered = 0;
+       bbr->next_rtt_delivered = tp->delivered;
        bbr->prev_ca_state = TCP_CA_Open;
        bbr->packet_conservation = 0;
 
index e09147a..fc61cd3 100644 (file)
@@ -298,6 +298,9 @@ int tcp_gro_complete(struct sk_buff *skb)
        if (th->cwr)
                skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
 
+       if (skb->encapsulation)
+               skb->inner_transport_header = skb->transport_header;
+
        return 0;
 }
 EXPORT_SYMBOL(tcp_gro_complete);
index 9dde1e5..1380a6b 100644 (file)
@@ -624,6 +624,10 @@ static int udp_gro_complete_segment(struct sk_buff *skb)
 
        skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
        skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_L4;
+
+       if (skb->encapsulation)
+               skb->inner_transport_header = skb->transport_header;
+
        return 0;
 }
 
index 2d650dc..ef75c9b 100644 (file)
@@ -1341,7 +1341,7 @@ static void __fib6_update_sernum_upto_root(struct fib6_info *rt,
        struct fib6_node *fn = rcu_dereference_protected(rt->fib6_node,
                                lockdep_is_held(&rt->fib6_table->tb6_lock));
 
-       /* paired with smp_rmb() in rt6_get_cookie_safe() */
+       /* paired with smp_rmb() in fib6_get_cookie_safe() */
        smp_wmb();
        while (fn) {
                fn->fn_sernum = sernum;
index bc224f9..7a5e90e 100644 (file)
@@ -629,6 +629,8 @@ drop:
 
 static int gre_handle_offloads(struct sk_buff *skb, bool csum)
 {
+       if (csum && skb_checksum_start(skb) < skb->data)
+               return -EINVAL;
        return iptunnel_handle_offloads(skb,
                                        csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
 }
index e1b9f7a..8e6ca9a 100644 (file)
@@ -549,9 +549,10 @@ int ip6_forward(struct sk_buff *skb)
        if (net->ipv6.devconf_all->proxy_ndp &&
            pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
                int proxied = ip6_forward_proxy_check(skb);
-               if (proxied > 0)
+               if (proxied > 0) {
+                       hdr->hop_limit--;
                        return ip6_input(skb);
-               else if (proxied < 0) {
+               else if (proxied < 0) {
                        __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
                        goto drop;
                }
index b6ddf23..c5e8ecb 100644 (file)
@@ -41,6 +41,7 @@
 #include <linux/nsproxy.h>
 #include <linux/slab.h>
 #include <linux/jhash.h>
+#include <linux/siphash.h>
 #include <net/net_namespace.h>
 #include <net/snmp.h>
 #include <net/ipv6.h>
@@ -1484,17 +1485,24 @@ static void rt6_exception_remove_oldest(struct rt6_exception_bucket *bucket)
 static u32 rt6_exception_hash(const struct in6_addr *dst,
                              const struct in6_addr *src)
 {
-       static u32 seed __read_mostly;
-       u32 val;
+       static siphash_key_t rt6_exception_key __read_mostly;
+       struct {
+               struct in6_addr dst;
+               struct in6_addr src;
+       } __aligned(SIPHASH_ALIGNMENT) combined = {
+               .dst = *dst,
+       };
+       u64 val;
 
-       net_get_random_once(&seed, sizeof(seed));
-       val = jhash2((const u32 *)dst, sizeof(*dst)/sizeof(u32), seed);
+       net_get_random_once(&rt6_exception_key, sizeof(rt6_exception_key));
 
 #ifdef CONFIG_IPV6_SUBTREES
        if (src)
-               val = jhash2((const u32 *)src, sizeof(*src)/sizeof(u32), val);
+               combined.src = *src;
 #endif
-       return hash_32(val, FIB6_EXCEPTION_BUCKET_SIZE_SHIFT);
+       val = siphash(&combined, sizeof(combined), &rt6_exception_key);
+
+       return hash_64(val, FIB6_EXCEPTION_BUCKET_SIZE_SHIFT);
 }
 
 /* Helper function to find the cached rt in the hash table
index 7180979..ac5cadd 100644 (file)
@@ -98,8 +98,16 @@ static inline u8 llc_ui_header_len(struct sock *sk, struct sockaddr_llc *addr)
 {
        u8 rc = LLC_PDU_LEN_U;
 
-       if (addr->sllc_test || addr->sllc_xid)
+       if (addr->sllc_test)
                rc = LLC_PDU_LEN_U;
+       else if (addr->sllc_xid)
+               /* We need to expand header to sizeof(struct llc_xid_info)
+                * since llc_pdu_init_as_xid_cmd() sets 4,5,6 bytes of LLC header
+                * as XID PDU. In llc_ui_sendmsg() we reserved header size and then
+                * filled all other space with user data. If we won't reserve this
+                * bytes, llc_pdu_init_as_xid_cmd() will overwrite user data
+                */
+               rc = LLC_PDU_LEN_U_XID;
        else if (sk->sk_type == SOCK_STREAM)
                rc = LLC_PDU_LEN_I;
        return rc;
index b554f26..79d1cef 100644 (file)
@@ -79,7 +79,7 @@ int llc_sap_action_send_xid_c(struct llc_sap *sap, struct sk_buff *skb)
        struct llc_sap_state_ev *ev = llc_sap_ev(skb);
        int rc;
 
-       llc_pdu_header_init(skb, LLC_PDU_TYPE_U, ev->saddr.lsap,
+       llc_pdu_header_init(skb, LLC_PDU_TYPE_U_XID, ev->saddr.lsap,
                            ev->daddr.lsap, LLC_PDU_CMD);
        llc_pdu_init_as_xid_cmd(skb, LLC_XID_NULL_CLASS_2, 0);
        rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac);
index 84cc773..4e6f11e 100644 (file)
@@ -152,6 +152,8 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
                                  struct vif_params *params)
 {
        struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+       struct ieee80211_local *local = sdata->local;
+       struct sta_info *sta;
        int ret;
 
        ret = ieee80211_if_change_type(sdata, type);
@@ -162,7 +164,24 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
                RCU_INIT_POINTER(sdata->u.vlan.sta, NULL);
                ieee80211_check_fast_rx_iface(sdata);
        } else if (type == NL80211_IFTYPE_STATION && params->use_4addr >= 0) {
+               struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+
+               if (params->use_4addr == ifmgd->use_4addr)
+                       return 0;
+
                sdata->u.mgd.use_4addr = params->use_4addr;
+               if (!ifmgd->associated)
+                       return 0;
+
+               mutex_lock(&local->sta_mtx);
+               sta = sta_info_get(sdata, ifmgd->bssid);
+               if (sta)
+                       drv_sta_set_4addr(local, sdata, &sta->sta,
+                                         params->use_4addr);
+               mutex_unlock(&local->sta_mtx);
+
+               if (params->use_4addr)
+                       ieee80211_send_4addr_nullfunc(local, sdata);
        }
 
        if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
index 22549b9..30ce6d2 100644 (file)
@@ -2201,6 +2201,8 @@ void ieee80211_dynamic_ps_timer(struct timer_list *t);
 void ieee80211_send_nullfunc(struct ieee80211_local *local,
                             struct ieee80211_sub_if_data *sdata,
                             bool powersave);
+void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local,
+                                  struct ieee80211_sub_if_data *sdata);
 void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata,
                             struct ieee80211_hdr *hdr, bool ack, u16 tx_time);
 
index 05f4c3c..fcae76d 100644 (file)
@@ -260,6 +260,8 @@ static void ieee80211_restart_work(struct work_struct *work)
        flush_work(&local->radar_detected_work);
 
        rtnl_lock();
+       /* we might do interface manipulations, so need both */
+       wiphy_lock(local->hw.wiphy);
 
        WARN(test_bit(SCAN_HW_SCANNING, &local->scanning),
             "%s called with hardware scan in progress\n", __func__);
index a00f11a..c0ea3b1 100644 (file)
@@ -1095,8 +1095,8 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
        ieee80211_tx_skb(sdata, skb);
 }
 
-static void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local,
-                                         struct ieee80211_sub_if_data *sdata)
+void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local,
+                                  struct ieee80211_sub_if_data *sdata)
 {
        struct sk_buff *skb;
        struct ieee80211_hdr *nullfunc;
index 771921c..2563473 100644 (file)
@@ -730,7 +730,8 @@ ieee80211_make_monitor_skb(struct ieee80211_local *local,
                 * Need to make a copy and possibly remove radiotap header
                 * and FCS from the original.
                 */
-               skb = skb_copy_expand(*origskb, needed_headroom, 0, GFP_ATOMIC);
+               skb = skb_copy_expand(*origskb, needed_headroom + NET_SKB_PAD,
+                                     0, GFP_ATOMIC);
 
                if (!skb)
                        return NULL;
index e969811..8509778 100644 (file)
@@ -1147,6 +1147,29 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
        return queued;
 }
 
+static void
+ieee80211_aggr_check(struct ieee80211_sub_if_data *sdata,
+                    struct sta_info *sta,
+                    struct sk_buff *skb)
+{
+       struct rate_control_ref *ref = sdata->local->rate_ctrl;
+       u16 tid;
+
+       if (!ref || !(ref->ops->capa & RATE_CTRL_CAPA_AMPDU_TRIGGER))
+               return;
+
+       if (!sta || !sta->sta.ht_cap.ht_supported ||
+           !sta->sta.wme || skb_get_queue_mapping(skb) == IEEE80211_AC_VO ||
+           skb->protocol == sdata->control_port_protocol)
+               return;
+
+       tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
+       if (likely(sta->ampdu_mlme.tid_tx[tid]))
+               return;
+
+       ieee80211_start_tx_ba_session(&sta->sta, tid, 0);
+}
+
 /*
  * initialises @tx
  * pass %NULL for the station if unknown, a valid pointer if known
@@ -1160,6 +1183,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_hdr *hdr;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       bool aggr_check = false;
        int tid;
 
        memset(tx, 0, sizeof(*tx));
@@ -1188,8 +1212,10 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
                } else if (tx->sdata->control_port_protocol == tx->skb->protocol) {
                        tx->sta = sta_info_get_bss(sdata, hdr->addr1);
                }
-               if (!tx->sta && !is_multicast_ether_addr(hdr->addr1))
+               if (!tx->sta && !is_multicast_ether_addr(hdr->addr1)) {
                        tx->sta = sta_info_get(sdata, hdr->addr1);
+                       aggr_check = true;
+               }
        }
 
        if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) &&
@@ -1199,8 +1225,12 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
                struct tid_ampdu_tx *tid_tx;
 
                tid = ieee80211_get_tid(hdr);
-
                tid_tx = rcu_dereference(tx->sta->ampdu_mlme.tid_tx[tid]);
+               if (!tid_tx && aggr_check) {
+                       ieee80211_aggr_check(sdata, tx->sta, skb);
+                       tid_tx = rcu_dereference(tx->sta->ampdu_mlme.tid_tx[tid]);
+               }
+
                if (tid_tx) {
                        bool queued;
 
@@ -4120,29 +4150,6 @@ void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac)
 }
 EXPORT_SYMBOL(ieee80211_txq_schedule_start);
 
-static void
-ieee80211_aggr_check(struct ieee80211_sub_if_data *sdata,
-                    struct sta_info *sta,
-                    struct sk_buff *skb)
-{
-       struct rate_control_ref *ref = sdata->local->rate_ctrl;
-       u16 tid;
-
-       if (!ref || !(ref->ops->capa & RATE_CTRL_CAPA_AMPDU_TRIGGER))
-               return;
-
-       if (!sta || !sta->sta.ht_cap.ht_supported ||
-           !sta->sta.wme || skb_get_queue_mapping(skb) == IEEE80211_AC_VO ||
-           skb->protocol == sdata->control_port_protocol)
-               return;
-
-       tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
-       if (likely(sta->ampdu_mlme.tid_tx[tid]))
-               return;
-
-       ieee80211_start_tx_ba_session(&sta->sta, tid, 0);
-}
-
 void __ieee80211_subif_start_xmit(struct sk_buff *skb,
                                  struct net_device *dev,
                                  u32 info_flags,
index 4452455..7adcbc1 100644 (file)
@@ -885,20 +885,16 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk,
                return subflow->mp_capable;
        }
 
-       if (mp_opt->dss && mp_opt->use_ack) {
+       if ((mp_opt->dss && mp_opt->use_ack) ||
+           (mp_opt->add_addr && !mp_opt->echo)) {
                /* subflows are fully established as soon as we get any
-                * additional ack.
+                * additional ack, including ADD_ADDR.
                 */
                subflow->fully_established = 1;
                WRITE_ONCE(msk->fully_established, true);
                goto fully_established;
        }
 
-       if (mp_opt->add_addr) {
-               WRITE_ONCE(msk->fully_established, true);
-               return true;
-       }
-
        /* If the first established packet does not contain MP_CAPABLE + data
         * then fallback to TCP. Fallback scenarios requires a reset for
         * MP_JOIN subflows.
index d2591eb..7b37944 100644 (file)
@@ -27,7 +27,6 @@ struct mptcp_pm_addr_entry {
        struct mptcp_addr_info  addr;
        u8                      flags;
        int                     ifindex;
-       struct rcu_head         rcu;
        struct socket           *lsk;
 };
 
@@ -1136,36 +1135,12 @@ next:
        return 0;
 }
 
-struct addr_entry_release_work {
-       struct rcu_work rwork;
-       struct mptcp_pm_addr_entry *entry;
-};
-
-static void mptcp_pm_release_addr_entry(struct work_struct *work)
+/* caller must ensure the RCU grace period is already elapsed */
+static void __mptcp_pm_release_addr_entry(struct mptcp_pm_addr_entry *entry)
 {
-       struct addr_entry_release_work *w;
-       struct mptcp_pm_addr_entry *entry;
-
-       w = container_of(to_rcu_work(work), struct addr_entry_release_work, rwork);
-       entry = w->entry;
-       if (entry) {
-               if (entry->lsk)
-                       sock_release(entry->lsk);
-               kfree(entry);
-       }
-       kfree(w);
-}
-
-static void mptcp_pm_free_addr_entry(struct mptcp_pm_addr_entry *entry)
-{
-       struct addr_entry_release_work *w;
-
-       w = kmalloc(sizeof(*w), GFP_ATOMIC);
-       if (w) {
-               INIT_RCU_WORK(&w->rwork, mptcp_pm_release_addr_entry);
-               w->entry = entry;
-               queue_rcu_work(system_wq, &w->rwork);
-       }
+       if (entry->lsk)
+               sock_release(entry->lsk);
+       kfree(entry);
 }
 
 static int mptcp_nl_remove_id_zero_address(struct net *net,
@@ -1245,7 +1220,8 @@ static int mptcp_nl_cmd_del_addr(struct sk_buff *skb, struct genl_info *info)
        spin_unlock_bh(&pernet->lock);
 
        mptcp_nl_remove_subflow_and_signal_addr(sock_net(skb->sk), &entry->addr);
-       mptcp_pm_free_addr_entry(entry);
+       synchronize_rcu();
+       __mptcp_pm_release_addr_entry(entry);
 
        return ret;
 }
@@ -1298,6 +1274,7 @@ static void mptcp_nl_remove_addrs_list(struct net *net,
        }
 }
 
+/* caller must ensure the RCU grace period is already elapsed */
 static void __flush_addrs(struct list_head *list)
 {
        while (!list_empty(list)) {
@@ -1306,7 +1283,7 @@ static void __flush_addrs(struct list_head *list)
                cur = list_entry(list->next,
                                 struct mptcp_pm_addr_entry, list);
                list_del_rcu(&cur->list);
-               mptcp_pm_free_addr_entry(cur);
+               __mptcp_pm_release_addr_entry(cur);
        }
 }
 
@@ -1330,6 +1307,7 @@ static int mptcp_nl_cmd_flush_addrs(struct sk_buff *skb, struct genl_info *info)
        bitmap_zero(pernet->id_bitmap, MAX_ADDR_ID + 1);
        spin_unlock_bh(&pernet->lock);
        mptcp_nl_remove_addrs_list(sock_net(skb->sk), &free_list);
+       synchronize_rcu();
        __flush_addrs(&free_list);
        return 0;
 }
@@ -1940,7 +1918,8 @@ static void __net_exit pm_nl_exit_net(struct list_head *net_list)
                struct pm_nl_pernet *pernet = net_generic(net, pm_nl_pernet_id);
 
                /* net is removed from namespace list, can't race with
-                * other modifiers
+                * other modifiers, also netns core already waited for a
+                * RCU grace period.
                 */
                __flush_addrs(&pernet->local_addr_list);
        }
index d1bef23..dd30c03 100644 (file)
@@ -132,8 +132,11 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
                ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
                if (ret)
                        return ret;
-               if (ip > ip_to)
+               if (ip > ip_to) {
+                       if (ip_to == 0)
+                               return -IPSET_ERR_HASH_ELEM;
                        swap(ip, ip_to);
+               }
        } else if (tb[IPSET_ATTR_CIDR]) {
                u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
 
@@ -144,6 +147,10 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
 
        hosts = h->netmask == 32 ? 1 : 2 << (32 - h->netmask - 1);
 
+       /* 64bit division is not allowed on 32bit */
+       if (((u64)ip_to - ip + 1) >> (32 - h->netmask) > IPSET_MAX_RANGE)
+               return -ERANGE;
+
        if (retried) {
                ip = ntohl(h->next.ip);
                e.ip = htonl(ip);
index 18346d1..153de34 100644 (file)
@@ -121,6 +121,8 @@ hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[],
 
        e.mark = ntohl(nla_get_be32(tb[IPSET_ATTR_MARK]));
        e.mark &= h->markmask;
+       if (e.mark == 0 && e.ip == 0)
+               return -IPSET_ERR_HASH_ELEM;
 
        if (adt == IPSET_TEST ||
            !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR])) {
@@ -133,8 +135,11 @@ hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[],
                ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
                if (ret)
                        return ret;
-               if (ip > ip_to)
+               if (ip > ip_to) {
+                       if (e.mark == 0 && ip_to == 0)
+                               return -IPSET_ERR_HASH_ELEM;
                        swap(ip, ip_to);
+               }
        } else if (tb[IPSET_ATTR_CIDR]) {
                u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
 
@@ -143,6 +148,9 @@ hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[],
                ip_set_mask_from_to(ip, ip_to, cidr);
        }
 
+       if (((u64)ip_to - ip + 1) > IPSET_MAX_RANGE)
+               return -ERANGE;
+
        if (retried)
                ip = ntohl(h->next.ip);
        for (; ip <= ip_to; ip++) {
index e1ca111..7303138 100644 (file)
@@ -173,6 +173,9 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
                        swap(port, port_to);
        }
 
+       if (((u64)ip_to - ip + 1)*(port_to - port + 1) > IPSET_MAX_RANGE)
+               return -ERANGE;
+
        if (retried)
                ip = ntohl(h->next.ip);
        for (; ip <= ip_to; ip++) {
index ab179e0..334fb1a 100644 (file)
@@ -180,6 +180,9 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
                        swap(port, port_to);
        }
 
+       if (((u64)ip_to - ip + 1)*(port_to - port + 1) > IPSET_MAX_RANGE)
+               return -ERANGE;
+
        if (retried)
                ip = ntohl(h->next.ip);
        for (; ip <= ip_to; ip++) {
index 8f075b4..7df94f4 100644 (file)
@@ -253,6 +253,9 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
                        swap(port, port_to);
        }
 
+       if (((u64)ip_to - ip + 1)*(port_to - port + 1) > IPSET_MAX_RANGE)
+               return -ERANGE;
+
        ip2_to = ip2_from;
        if (tb[IPSET_ATTR_IP2_TO]) {
                ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2_TO], &ip2_to);
index c1a11f0..1422739 100644 (file)
@@ -140,7 +140,7 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_net4_elem e = { .cidr = HOST_MASK };
        struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
-       u32 ip = 0, ip_to = 0;
+       u32 ip = 0, ip_to = 0, ipn, n = 0;
        int ret;
 
        if (tb[IPSET_ATTR_LINENO])
@@ -188,6 +188,15 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
                if (ip + UINT_MAX == ip_to)
                        return -IPSET_ERR_HASH_RANGE;
        }
+       ipn = ip;
+       do {
+               ipn = ip_set_range_to_cidr(ipn, ip_to, &e.cidr);
+               n++;
+       } while (ipn++ < ip_to);
+
+       if (n > IPSET_MAX_RANGE)
+               return -ERANGE;
+
        if (retried)
                ip = ntohl(h->next.ip);
        do {
index ddd51c2..9810f5b 100644 (file)
@@ -202,7 +202,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_netiface4_elem e = { .cidr = HOST_MASK, .elem = 1 };
        struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
-       u32 ip = 0, ip_to = 0;
+       u32 ip = 0, ip_to = 0, ipn, n = 0;
        int ret;
 
        if (tb[IPSET_ATTR_LINENO])
@@ -256,6 +256,14 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
        } else {
                ip_set_mask_from_to(ip, ip_to, e.cidr);
        }
+       ipn = ip;
+       do {
+               ipn = ip_set_range_to_cidr(ipn, ip_to, &e.cidr);
+               n++;
+       } while (ipn++ < ip_to);
+
+       if (n > IPSET_MAX_RANGE)
+               return -ERANGE;
 
        if (retried)
                ip = ntohl(h->next.ip);
index 6532f05..3d09eef 100644 (file)
@@ -168,7 +168,8 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
        struct hash_netnet4_elem e = { };
        struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
        u32 ip = 0, ip_to = 0;
-       u32 ip2 = 0, ip2_from = 0, ip2_to = 0;
+       u32 ip2 = 0, ip2_from = 0, ip2_to = 0, ipn;
+       u64 n = 0, m = 0;
        int ret;
 
        if (tb[IPSET_ATTR_LINENO])
@@ -244,6 +245,19 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
        } else {
                ip_set_mask_from_to(ip2_from, ip2_to, e.cidr[1]);
        }
+       ipn = ip;
+       do {
+               ipn = ip_set_range_to_cidr(ipn, ip_to, &e.cidr[0]);
+               n++;
+       } while (ipn++ < ip_to);
+       ipn = ip2_from;
+       do {
+               ipn = ip_set_range_to_cidr(ipn, ip2_to, &e.cidr[1]);
+               m++;
+       } while (ipn++ < ip2_to);
+
+       if (n*m > IPSET_MAX_RANGE)
+               return -ERANGE;
 
        if (retried) {
                ip = ntohl(h->next.ip[0]);
index ec1564a..09cf72e 100644 (file)
@@ -158,7 +158,8 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_netport4_elem e = { .cidr = HOST_MASK - 1 };
        struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
-       u32 port, port_to, p = 0, ip = 0, ip_to = 0;
+       u32 port, port_to, p = 0, ip = 0, ip_to = 0, ipn;
+       u64 n = 0;
        bool with_ports = false;
        u8 cidr;
        int ret;
@@ -235,6 +236,14 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
        } else {
                ip_set_mask_from_to(ip, ip_to, e.cidr + 1);
        }
+       ipn = ip;
+       do {
+               ipn = ip_set_range_to_cidr(ipn, ip_to, &cidr);
+               n++;
+       } while (ipn++ < ip_to);
+
+       if (n*(port_to - port + 1) > IPSET_MAX_RANGE)
+               return -ERANGE;
 
        if (retried) {
                ip = ntohl(h->next.ip);
index 0e91d1e..19bcdb3 100644 (file)
@@ -182,7 +182,8 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
        struct hash_netportnet4_elem e = { };
        struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
        u32 ip = 0, ip_to = 0, p = 0, port, port_to;
-       u32 ip2_from = 0, ip2_to = 0, ip2;
+       u32 ip2_from = 0, ip2_to = 0, ip2, ipn;
+       u64 n = 0, m = 0;
        bool with_ports = false;
        int ret;
 
@@ -284,6 +285,19 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
        } else {
                ip_set_mask_from_to(ip2_from, ip2_to, e.cidr[1]);
        }
+       ipn = ip;
+       do {
+               ipn = ip_set_range_to_cidr(ipn, ip_to, &e.cidr[0]);
+               n++;
+       } while (ipn++ < ip_to);
+       ipn = ip2_from;
+       do {
+               ipn = ip_set_range_to_cidr(ipn, ip2_to, &e.cidr[1]);
+               m++;
+       } while (ipn++ < ip2_to);
+
+       if (n*m*(port_to - port + 1) > IPSET_MAX_RANGE)
+               return -ERANGE;
 
        if (retried) {
                ip = ntohl(h->next.ip[0]);
index 83c52df..d31dbcc 100644 (file)
@@ -66,22 +66,17 @@ EXPORT_SYMBOL_GPL(nf_conntrack_hash);
 
 struct conntrack_gc_work {
        struct delayed_work     dwork;
-       u32                     last_bucket;
+       u32                     next_bucket;
        bool                    exiting;
        bool                    early_drop;
-       long                    next_gc_run;
 };
 
 static __read_mostly struct kmem_cache *nf_conntrack_cachep;
 static DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
 static __read_mostly bool nf_conntrack_locks_all;
 
-/* every gc cycle scans at most 1/GC_MAX_BUCKETS_DIV part of table */
-#define GC_MAX_BUCKETS_DIV     128u
-/* upper bound of full table scan */
-#define GC_MAX_SCAN_JIFFIES    (16u * HZ)
-/* desired ratio of entries found to be expired */
-#define GC_EVICT_RATIO 50u
+#define GC_SCAN_INTERVAL       (120u * HZ)
+#define GC_SCAN_MAX_DURATION   msecs_to_jiffies(10)
 
 static struct conntrack_gc_work conntrack_gc_work;
 
@@ -670,8 +665,13 @@ bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
                return false;
 
        tstamp = nf_conn_tstamp_find(ct);
-       if (tstamp && tstamp->stop == 0)
+       if (tstamp) {
+               s32 timeout = ct->timeout - nfct_time_stamp;
+
                tstamp->stop = ktime_get_real_ns();
+               if (timeout < 0)
+                       tstamp->stop -= jiffies_to_nsecs(-timeout);
+       }
 
        if (nf_conntrack_event_report(IPCT_DESTROY, ct,
                                    portid, report) < 0) {
@@ -1358,17 +1358,13 @@ static bool gc_worker_can_early_drop(const struct nf_conn *ct)
 
 static void gc_worker(struct work_struct *work)
 {
-       unsigned int min_interval = max(HZ / GC_MAX_BUCKETS_DIV, 1u);
-       unsigned int i, goal, buckets = 0, expired_count = 0;
-       unsigned int nf_conntrack_max95 = 0;
+       unsigned long end_time = jiffies + GC_SCAN_MAX_DURATION;
+       unsigned int i, hashsz, nf_conntrack_max95 = 0;
+       unsigned long next_run = GC_SCAN_INTERVAL;
        struct conntrack_gc_work *gc_work;
-       unsigned int ratio, scanned = 0;
-       unsigned long next_run;
-
        gc_work = container_of(work, struct conntrack_gc_work, dwork.work);
 
-       goal = nf_conntrack_htable_size / GC_MAX_BUCKETS_DIV;
-       i = gc_work->last_bucket;
+       i = gc_work->next_bucket;
        if (gc_work->early_drop)
                nf_conntrack_max95 = nf_conntrack_max / 100u * 95u;
 
@@ -1376,15 +1372,15 @@ static void gc_worker(struct work_struct *work)
                struct nf_conntrack_tuple_hash *h;
                struct hlist_nulls_head *ct_hash;
                struct hlist_nulls_node *n;
-               unsigned int hashsz;
                struct nf_conn *tmp;
 
-               i++;
                rcu_read_lock();
 
                nf_conntrack_get_ht(&ct_hash, &hashsz);
-               if (i >= hashsz)
-                       i = 0;
+               if (i >= hashsz) {
+                       rcu_read_unlock();
+                       break;
+               }
 
                hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[i], hnnode) {
                        struct nf_conntrack_net *cnet;
@@ -1392,7 +1388,6 @@ static void gc_worker(struct work_struct *work)
 
                        tmp = nf_ct_tuplehash_to_ctrack(h);
 
-                       scanned++;
                        if (test_bit(IPS_OFFLOAD_BIT, &tmp->status)) {
                                nf_ct_offload_timeout(tmp);
                                continue;
@@ -1400,7 +1395,6 @@ static void gc_worker(struct work_struct *work)
 
                        if (nf_ct_is_expired(tmp)) {
                                nf_ct_gc_expired(tmp);
-                               expired_count++;
                                continue;
                        }
 
@@ -1433,7 +1427,14 @@ static void gc_worker(struct work_struct *work)
                 */
                rcu_read_unlock();
                cond_resched();
-       } while (++buckets < goal);
+               i++;
+
+               if (time_after(jiffies, end_time) && i < hashsz) {
+                       gc_work->next_bucket = i;
+                       next_run = 0;
+                       break;
+               }
+       } while (i < hashsz);
 
        if (gc_work->exiting)
                return;
@@ -1444,40 +1445,17 @@ static void gc_worker(struct work_struct *work)
         *
         * This worker is only here to reap expired entries when system went
         * idle after a busy period.
-        *
-        * The heuristics below are supposed to balance conflicting goals:
-        *
-        * 1. Minimize time until we notice a stale entry
-        * 2. Maximize scan intervals to not waste cycles
-        *
-        * Normally, expire ratio will be close to 0.
-        *
-        * As soon as a sizeable fraction of the entries have expired
-        * increase scan frequency.
         */
-       ratio = scanned ? expired_count * 100 / scanned : 0;
-       if (ratio > GC_EVICT_RATIO) {
-               gc_work->next_gc_run = min_interval;
-       } else {
-               unsigned int max = GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV;
-
-               BUILD_BUG_ON((GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV) == 0);
-
-               gc_work->next_gc_run += min_interval;
-               if (gc_work->next_gc_run > max)
-                       gc_work->next_gc_run = max;
+       if (next_run) {
+               gc_work->early_drop = false;
+               gc_work->next_bucket = 0;
        }
-
-       next_run = gc_work->next_gc_run;
-       gc_work->last_bucket = i;
-       gc_work->early_drop = false;
        queue_delayed_work(system_power_efficient_wq, &gc_work->dwork, next_run);
 }
 
 static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work)
 {
        INIT_DEFERRABLE_WORK(&gc_work->dwork, gc_worker);
-       gc_work->next_gc_run = HZ;
        gc_work->exiting = false;
 }
 
index 3259416..af5115e 100644 (file)
@@ -1478,7 +1478,6 @@ void nf_conntrack_tcp_init_net(struct net *net)
 
 #if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
        tn->offload_timeout = 30 * HZ;
-       tn->offload_pickup = 120 * HZ;
 #endif
 }
 
index 698fee4..f8e3c0d 100644 (file)
@@ -271,7 +271,6 @@ void nf_conntrack_udp_init_net(struct net *net)
 
 #if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
        un->offload_timeout = 30 * HZ;
-       un->offload_pickup = 30 * HZ;
 #endif
 }
 
index 214d9f9..e84b499 100644 (file)
@@ -575,7 +575,6 @@ enum nf_ct_sysctl_index {
        NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_UNACK,
 #if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
        NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_OFFLOAD,
-       NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_OFFLOAD_PICKUP,
 #endif
        NF_SYSCTL_CT_PROTO_TCP_LOOSE,
        NF_SYSCTL_CT_PROTO_TCP_LIBERAL,
@@ -585,7 +584,6 @@ enum nf_ct_sysctl_index {
        NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_STREAM,
 #if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
        NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD,
-       NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD_PICKUP,
 #endif
        NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP,
        NF_SYSCTL_CT_PROTO_TIMEOUT_ICMPV6,
@@ -776,12 +774,6 @@ static struct ctl_table nf_ct_sysctl_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec_jiffies,
        },
-       [NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_OFFLOAD_PICKUP] = {
-               .procname       = "nf_flowtable_tcp_pickup",
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_jiffies,
-       },
 #endif
        [NF_SYSCTL_CT_PROTO_TCP_LOOSE] = {
                .procname       = "nf_conntrack_tcp_loose",
@@ -832,12 +824,6 @@ static struct ctl_table nf_ct_sysctl_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec_jiffies,
        },
-       [NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD_PICKUP] = {
-               .procname       = "nf_flowtable_udp_pickup",
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_jiffies,
-       },
 #endif
        [NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP] = {
                .procname       = "nf_conntrack_icmp_timeout",
@@ -1018,7 +1004,6 @@ static void nf_conntrack_standalone_init_tcp_sysctl(struct net *net,
 
 #if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
        table[NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_OFFLOAD].data = &tn->offload_timeout;
-       table[NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_OFFLOAD_PICKUP].data = &tn->offload_pickup;
 #endif
 
 }
@@ -1111,7 +1096,6 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net)
        table[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_STREAM].data = &un->timeouts[UDP_CT_REPLIED];
 #if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
        table[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD].data = &un->offload_timeout;
-       table[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD_PICKUP].data = &un->offload_pickup;
 #endif
 
        nf_conntrack_standalone_init_tcp_sysctl(net, table);
index 1e50908..8788b51 100644 (file)
@@ -183,7 +183,7 @@ static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
        const struct nf_conntrack_l4proto *l4proto;
        struct net *net = nf_ct_net(ct);
        int l4num = nf_ct_protonum(ct);
-       unsigned int timeout;
+       s32 timeout;
 
        l4proto = nf_ct_l4proto_find(l4num);
        if (!l4proto)
@@ -192,15 +192,20 @@ static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
        if (l4num == IPPROTO_TCP) {
                struct nf_tcp_net *tn = nf_tcp_pernet(net);
 
-               timeout = tn->offload_pickup;
+               timeout = tn->timeouts[TCP_CONNTRACK_ESTABLISHED];
+               timeout -= tn->offload_timeout;
        } else if (l4num == IPPROTO_UDP) {
                struct nf_udp_net *tn = nf_udp_pernet(net);
 
-               timeout = tn->offload_pickup;
+               timeout = tn->timeouts[UDP_CT_REPLIED];
+               timeout -= tn->offload_timeout;
        } else {
                return;
        }
 
+       if (timeout < 0)
+               timeout = 0;
+
        if (nf_flow_timeout_delta(ct->timeout) > (__s32)timeout)
                ct->timeout = nfct_time_stamp + timeout;
 }
@@ -331,7 +336,11 @@ EXPORT_SYMBOL_GPL(flow_offload_add);
 void flow_offload_refresh(struct nf_flowtable *flow_table,
                          struct flow_offload *flow)
 {
-       flow->timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow);
+       u32 timeout;
+
+       timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow);
+       if (READ_ONCE(flow->timeout) != timeout)
+               WRITE_ONCE(flow->timeout, timeout);
 
        if (likely(!nf_flowtable_hw_offload(flow_table)))
                return;
index de182d1..081437d 100644 (file)
@@ -8445,6 +8445,16 @@ static int nf_tables_commit_audit_alloc(struct list_head *adl,
        return 0;
 }
 
+static void nf_tables_commit_audit_free(struct list_head *adl)
+{
+       struct nft_audit_data *adp, *adn;
+
+       list_for_each_entry_safe(adp, adn, adl, list) {
+               list_del(&adp->list);
+               kfree(adp);
+       }
+}
+
 static void nf_tables_commit_audit_collect(struct list_head *adl,
                                           struct nft_table *table, u32 op)
 {
@@ -8509,6 +8519,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
                ret = nf_tables_commit_audit_alloc(&adl, trans->ctx.table);
                if (ret) {
                        nf_tables_commit_chain_prepare_cancel(net);
+                       nf_tables_commit_audit_free(&adl);
                        return ret;
                }
                if (trans->msg_type == NFT_MSG_NEWRULE ||
@@ -8518,6 +8529,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
                        ret = nf_tables_commit_chain_prepare(net, chain);
                        if (ret < 0) {
                                nf_tables_commit_chain_prepare_cancel(net);
+                               nf_tables_commit_audit_free(&adl);
                                return ret;
                        }
                }
index 50b4e3c..f554e2e 100644 (file)
@@ -89,11 +89,15 @@ static int nfnl_hook_put_nft_chain_info(struct sk_buff *nlskb,
        if (!nest2)
                goto cancel_nest;
 
-       ret = nla_put_string(nlskb, NFTA_CHAIN_TABLE, chain->table->name);
+       ret = nla_put_string(nlskb, NFNLA_CHAIN_TABLE, chain->table->name);
        if (ret)
                goto cancel_nest;
 
-       ret = nla_put_string(nlskb, NFTA_CHAIN_NAME, chain->name);
+       ret = nla_put_string(nlskb, NFNLA_CHAIN_NAME, chain->name);
+       if (ret)
+               goto cancel_nest;
+
+       ret = nla_put_u8(nlskb, NFNLA_CHAIN_FAMILY, chain->table->family);
        if (ret)
                goto cancel_nest;
 
@@ -109,18 +113,19 @@ cancel_nest:
 static int nfnl_hook_dump_one(struct sk_buff *nlskb,
                              const struct nfnl_dump_hook_data *ctx,
                              const struct nf_hook_ops *ops,
-                             unsigned int seq)
+                             int family, unsigned int seq)
 {
        u16 event = nfnl_msg_type(NFNL_SUBSYS_HOOK, NFNL_MSG_HOOK_GET);
        unsigned int portid = NETLINK_CB(nlskb).portid;
        struct nlmsghdr *nlh;
        int ret = -EMSGSIZE;
+       u32 hooknum;
 #ifdef CONFIG_KALLSYMS
        char sym[KSYM_SYMBOL_LEN];
        char *module_name;
 #endif
        nlh = nfnl_msg_put(nlskb, portid, seq, event,
-                          NLM_F_MULTI, ops->pf, NFNETLINK_V0, 0);
+                          NLM_F_MULTI, family, NFNETLINK_V0, 0);
        if (!nlh)
                goto nla_put_failure;
 
@@ -135,6 +140,7 @@ static int nfnl_hook_dump_one(struct sk_buff *nlskb,
        if (module_name) {
                char *end;
 
+               *module_name = '\0';
                module_name += 2;
                end = strchr(module_name, ']');
                if (end) {
@@ -151,7 +157,12 @@ static int nfnl_hook_dump_one(struct sk_buff *nlskb,
                goto nla_put_failure;
 #endif
 
-       ret = nla_put_be32(nlskb, NFNLA_HOOK_HOOKNUM, htonl(ops->hooknum));
+       if (ops->pf == NFPROTO_INET && ops->hooknum == NF_INET_INGRESS)
+               hooknum = NF_NETDEV_INGRESS;
+       else
+               hooknum = ops->hooknum;
+
+       ret = nla_put_be32(nlskb, NFNLA_HOOK_HOOKNUM, htonl(hooknum));
        if (ret)
                goto nla_put_failure;
 
@@ -174,7 +185,9 @@ static const struct nf_hook_entries *
 nfnl_hook_entries_head(u8 pf, unsigned int hook, struct net *net, const char *dev)
 {
        const struct nf_hook_entries *hook_head = NULL;
+#ifdef CONFIG_NETFILTER_INGRESS
        struct net_device *netdev;
+#endif
 
        switch (pf) {
        case NFPROTO_IPV4:
@@ -257,7 +270,8 @@ static int nfnl_hook_dump(struct sk_buff *nlskb,
        ops = nf_hook_entries_get_hook_ops(e);
 
        for (; i < e->num_hook_entries; i++) {
-               err = nfnl_hook_dump_one(nlskb, ctx, ops[i], cb->seq);
+               err = nfnl_hook_dump_one(nlskb, ctx, ops[i], family,
+                                        cb->nlh->nlmsg_seq);
                if (err)
                        break;
        }
index 8088b99..304e33c 100644 (file)
@@ -48,24 +48,30 @@ static void nft_last_eval(const struct nft_expr *expr,
 {
        struct nft_last_priv *priv = nft_expr_priv(expr);
 
-       priv->last_jiffies = jiffies;
-       priv->last_set = 1;
+       if (READ_ONCE(priv->last_jiffies) != jiffies)
+               WRITE_ONCE(priv->last_jiffies, jiffies);
+       if (READ_ONCE(priv->last_set) == 0)
+               WRITE_ONCE(priv->last_set, 1);
 }
 
 static int nft_last_dump(struct sk_buff *skb, const struct nft_expr *expr)
 {
        struct nft_last_priv *priv = nft_expr_priv(expr);
+       unsigned long last_jiffies = READ_ONCE(priv->last_jiffies);
+       u32 last_set = READ_ONCE(priv->last_set);
        __be64 msecs;
 
-       if (time_before(jiffies, priv->last_jiffies))
-               priv->last_set = 0;
+       if (time_before(jiffies, last_jiffies)) {
+               WRITE_ONCE(priv->last_set, 0);
+               last_set = 0;
+       }
 
-       if (priv->last_set)
-               msecs = nf_jiffies64_to_msecs(jiffies - priv->last_jiffies);
+       if (last_set)
+               msecs = nf_jiffies64_to_msecs(jiffies - last_jiffies);
        else
                msecs = 0;
 
-       if (nla_put_be32(skb, NFTA_LAST_SET, htonl(priv->last_set)) ||
+       if (nla_put_be32(skb, NFTA_LAST_SET, htonl(last_set)) ||
            nla_put_be64(skb, NFTA_LAST_MSECS, msecs, NFTA_LAST_PAD))
                goto nla_put_failure;
 
index 0840c63..be1595d 100644 (file)
@@ -201,7 +201,9 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
                alen = sizeof_field(struct nf_nat_range, min_addr.ip6);
                break;
        default:
-               return -EAFNOSUPPORT;
+               if (tb[NFTA_NAT_REG_ADDR_MIN])
+                       return -EAFNOSUPPORT;
+               break;
        }
        priv->family = family;
 
index e586424..9713035 100644 (file)
@@ -293,14 +293,14 @@ static bool icmp6hdr_ok(struct sk_buff *skb)
 }
 
 /**
- * Parse vlan tag from vlan header.
+ * parse_vlan_tag - Parse vlan tag from vlan header.
  * @skb: skb containing frame to parse
  * @key_vh: pointer to parsed vlan tag
  * @untag_vlan: should the vlan header be removed from the frame
  *
- * Returns ERROR on memory error.
- * Returns 0 if it encounters a non-vlan or incomplete packet.
- * Returns 1 after successfully parsing vlan tag.
+ * Return: ERROR on memory error.
+ * %0 if it encounters a non-vlan or incomplete packet.
+ * %1 after successfully parsing vlan tag.
  */
 static int parse_vlan_tag(struct sk_buff *skb, struct vlan_head *key_vh,
                          bool untag_vlan)
@@ -532,6 +532,7 @@ static int parse_nsh(struct sk_buff *skb, struct sw_flow_key *key)
  *       L3 header
  * @key: output flow key
  *
+ * Return: %0 if successful, otherwise a negative errno value.
  */
 static int key_extract_l3l4(struct sk_buff *skb, struct sw_flow_key *key)
 {
@@ -748,8 +749,6 @@ static int key_extract_l3l4(struct sk_buff *skb, struct sw_flow_key *key)
  *
  * The caller must ensure that skb->len >= ETH_HLEN.
  *
- * Returns 0 if successful, otherwise a negative errno value.
- *
  * Initializes @skb header fields as follows:
  *
  *    - skb->mac_header: the L2 header.
@@ -764,6 +763,8 @@ static int key_extract_l3l4(struct sk_buff *skb, struct sw_flow_key *key)
  *
  *    - skb->protocol: the type of the data starting at skb->network_header.
  *      Equals to key->eth.type.
+ *
+ * Return: %0 if successful, otherwise a negative errno value.
  */
 static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
 {
index 88deb5b..cf2ce58 100644 (file)
@@ -507,6 +507,7 @@ void ovs_vport_send(struct vport *vport, struct sk_buff *skb, u8 mac_proto)
        }
 
        skb->dev = vport->dev;
+       skb->tstamp = 0;
        vport->ops->send(skb);
        return;
 
index e6f4a62..0c30908 100644 (file)
@@ -493,7 +493,7 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
                goto err;
        }
 
-       if (len != ALIGN(size, 4) + hdrlen)
+       if (!size || len != ALIGN(size, 4) + hdrlen)
                goto err;
 
        if (cb->dst_port != QRTR_PORT_CTRL && cb->type != QRTR_TYPE_DATA &&
@@ -518,8 +518,10 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
                if (!ipc)
                        goto err;
 
-               if (sock_queue_rcv_skb(&ipc->sk, skb))
+               if (sock_queue_rcv_skb(&ipc->sk, skb)) {
+                       qrtr_port_put(ipc);
                        goto err;
+               }
 
                qrtr_port_put(ipc);
        }
@@ -839,6 +841,8 @@ static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb,
 
        ipc = qrtr_port_lookup(to->sq_port);
        if (!ipc || &ipc->sk == skb->sk) { /* do not send to self */
+               if (ipc)
+                       qrtr_port_put(ipc);
                kfree_skb(skb);
                return -ENODEV;
        }
index 9b6ffff..28c1b00 100644 (file)
@@ -131,9 +131,9 @@ static int rds_ib_post_reg_frmr(struct rds_ib_mr *ibmr)
                cpu_relax();
        }
 
-       ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_len,
+       ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_dma_len,
                                &off, PAGE_SIZE);
-       if (unlikely(ret != ibmr->sg_len))
+       if (unlikely(ret != ibmr->sg_dma_len))
                return ret < 0 ? ret : -EINVAL;
 
        if (cmpxchg(&frmr->fr_state,
index 7153c67..2ef4cd2 100644 (file)
@@ -273,6 +273,9 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
                        goto out;
        }
 
+       /* All mirred/redirected skbs should clear previous ct info */
+       nf_reset_ct(skb2);
+
        want_ingress = tcf_mirred_act_wants_ingress(m_eaction);
 
        expects_nh = want_ingress || !m_mac_header_xmit;
index 9515428..28af8b1 100644 (file)
@@ -720,7 +720,7 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
 skip_hash:
        if (flow_override)
                flow_hash = flow_override - 1;
-       else if (use_skbhash)
+       else if (use_skbhash && (flow_mode & CAKE_FLOW_FLOWS))
                flow_hash = skb->hash;
        if (host_override) {
                dsthost_hash = host_override - 1;
index c1e84d1..c76701a 100644 (file)
@@ -660,6 +660,13 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt,
        sch_tree_lock(sch);
 
        q->nbands = nbands;
+       for (i = nstrict; i < q->nstrict; i++) {
+               INIT_LIST_HEAD(&q->classes[i].alist);
+               if (q->classes[i].qdisc->q.qlen) {
+                       list_add_tail(&q->classes[i].alist, &q->active);
+                       q->classes[i].deficit = quanta[i];
+               }
+       }
        q->nstrict = nstrict;
        memcpy(q->prio2band, priomap, sizeof(priomap));
 
index d9ac60f..a8dd06c 100644 (file)
@@ -913,7 +913,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
 
        /* seqlock has the same scope of busylock, for NOLOCK qdisc */
        spin_lock_init(&sch->seqlock);
-       lockdep_set_class(&sch->busylock,
+       lockdep_set_class(&sch->seqlock,
                          dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
 
        seqcount_init(&sch->running);
index 07b30d0..9c79374 100644 (file)
@@ -1739,8 +1739,6 @@ static void taprio_attach(struct Qdisc *sch)
                if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
                        qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
                        old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
-                       if (ntx < dev->real_num_tx_queues)
-                               qdisc_hash_add(qdisc, false);
                } else {
                        old = dev_graft_qdisc(qdisc->dev_queue, sch);
                        qdisc_refcount_inc(sch);
index fe74c5f..db6b737 100644 (file)
@@ -857,14 +857,18 @@ int sctp_auth_set_key(struct sctp_endpoint *ep,
        memcpy(key->data, &auth_key->sca_key[0], auth_key->sca_keylength);
        cur_key->key = key;
 
-       if (replace) {
-               list_del_init(&shkey->key_list);
-               sctp_auth_shkey_release(shkey);
-               if (asoc && asoc->active_key_id == auth_key->sca_keynumber)
-                       sctp_auth_asoc_init_active_key(asoc, GFP_KERNEL);
+       if (!replace) {
+               list_add(&cur_key->key_list, sh_keys);
+               return 0;
        }
+
+       list_del_init(&shkey->key_list);
+       sctp_auth_shkey_release(shkey);
        list_add(&cur_key->key_list, sh_keys);
 
+       if (asoc && asoc->active_key_id == auth_key->sca_keynumber)
+               sctp_auth_asoc_init_active_key(asoc, GFP_KERNEL);
+
        return 0;
 }
 
index eb3c2a3..5ef86fd 100644 (file)
@@ -1203,7 +1203,7 @@ static struct sctp_association *__sctp_rcv_asconf_lookup(
        if (unlikely(!af))
                return NULL;
 
-       if (af->from_addr_param(&paddr, param, peer_port, 0))
+       if (!af->from_addr_param(&paddr, param, peer_port, 0))
                return NULL;
 
        return __sctp_lookup_association(net, laddr, &paddr, transportp);
index e48dd90..470dbdc 100644 (file)
@@ -100,8 +100,9 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
                list_for_each_entry_safe(addr, temp,
                                        &net->sctp.local_addr_list, list) {
                        if (addr->a.sa.sa_family == AF_INET6 &&
-                                       ipv6_addr_equal(&addr->a.v6.sin6_addr,
-                                               &ifa->addr)) {
+                           ipv6_addr_equal(&addr->a.v6.sin6_addr,
+                                           &ifa->addr) &&
+                           addr->a.v6.sin6_scope_id == ifa->idev->dev->ifindex) {
                                sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_DEL);
                                found = 1;
                                addr->valid = 0;
index 09a8f23..32df65f 100644 (file)
@@ -1109,12 +1109,12 @@ enum sctp_disposition sctp_sf_send_probe(struct net *net,
        if (!sctp_transport_pl_enabled(transport))
                return SCTP_DISPOSITION_CONSUME;
 
-       sctp_transport_pl_send(transport);
-
-       reply = sctp_make_heartbeat(asoc, transport, transport->pl.probe_size);
-       if (!reply)
-               return SCTP_DISPOSITION_NOMEM;
-       sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
+       if (sctp_transport_pl_send(transport)) {
+               reply = sctp_make_heartbeat(asoc, transport, transport->pl.probe_size);
+               if (!reply)
+                       return SCTP_DISPOSITION_NOMEM;
+               sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
+       }
        sctp_add_cmd_sf(commands, SCTP_CMD_PROBE_TIMER_UPDATE,
                        SCTP_TRANSPORT(transport));
 
@@ -1274,8 +1274,7 @@ enum sctp_disposition sctp_sf_backbeat_8_3(struct net *net,
                    !sctp_transport_pl_enabled(link))
                        return SCTP_DISPOSITION_DISCARD;
 
-               sctp_transport_pl_recv(link);
-               if (link->pl.state == SCTP_PL_COMPLETE)
+               if (sctp_transport_pl_recv(link))
                        return SCTP_DISPOSITION_CONSUME;
 
                return sctp_sf_send_probe(net, ep, asoc, type, link, commands);
index 397a624..a3d3ca6 100644 (file)
@@ -258,16 +258,13 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
        sctp_transport_pl_update(transport);
 }
 
-void sctp_transport_pl_send(struct sctp_transport *t)
+bool sctp_transport_pl_send(struct sctp_transport *t)
 {
-       pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, high: %d\n",
-                __func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, t->pl.probe_high);
-
-       if (t->pl.probe_count < SCTP_MAX_PROBES) {
-               t->pl.probe_count++;
-               return;
-       }
+       if (t->pl.probe_count < SCTP_MAX_PROBES)
+               goto out;
 
+       t->pl.last_rtx_chunks = t->asoc->rtx_data_chunks;
+       t->pl.probe_count = 0;
        if (t->pl.state == SCTP_PL_BASE) {
                if (t->pl.probe_size == SCTP_BASE_PLPMTU) { /* BASE_PLPMTU Confirmation Failed */
                        t->pl.state = SCTP_PL_ERROR; /* Base -> Error */
@@ -299,14 +296,27 @@ void sctp_transport_pl_send(struct sctp_transport *t)
                        sctp_assoc_sync_pmtu(t->asoc);
                }
        }
-       t->pl.probe_count = 1;
+
+out:
+       if (t->pl.state == SCTP_PL_COMPLETE && t->pl.raise_count < 30 &&
+           !t->pl.probe_count && t->pl.last_rtx_chunks == t->asoc->rtx_data_chunks) {
+               t->pl.raise_count++;
+               return false;
+       }
+
+       pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, high: %d\n",
+                __func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, t->pl.probe_high);
+
+       t->pl.probe_count++;
+       return true;
 }
 
-void sctp_transport_pl_recv(struct sctp_transport *t)
+bool sctp_transport_pl_recv(struct sctp_transport *t)
 {
        pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, high: %d\n",
                 __func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, t->pl.probe_high);
 
+       t->pl.last_rtx_chunks = t->asoc->rtx_data_chunks;
        t->pl.pmtu = t->pl.probe_size;
        t->pl.probe_count = 0;
        if (t->pl.state == SCTP_PL_BASE) {
@@ -323,7 +333,7 @@ void sctp_transport_pl_recv(struct sctp_transport *t)
                if (!t->pl.probe_high) {
                        t->pl.probe_size = min(t->pl.probe_size + SCTP_PL_BIG_STEP,
                                               SCTP_MAX_PLPMTU);
-                       return;
+                       return false;
                }
                t->pl.probe_size += SCTP_PL_MIN_STEP;
                if (t->pl.probe_size >= t->pl.probe_high) {
@@ -335,14 +345,13 @@ void sctp_transport_pl_recv(struct sctp_transport *t)
                        t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
                        sctp_assoc_sync_pmtu(t->asoc);
                }
-       } else if (t->pl.state == SCTP_PL_COMPLETE) {
-               t->pl.raise_count++;
-               if (t->pl.raise_count == 30) {
-                       /* Raise probe_size again after 30 * interval in Search Complete */
-                       t->pl.state = SCTP_PL_SEARCH; /* Search Complete -> Search */
-                       t->pl.probe_size += SCTP_PL_MIN_STEP;
-               }
+       } else if (t->pl.state == SCTP_PL_COMPLETE && t->pl.raise_count == 30) {
+               /* Raise probe_size again after 30 * interval in Search Complete */
+               t->pl.state = SCTP_PL_SEARCH; /* Search Complete -> Search */
+               t->pl.probe_size += SCTP_PL_MIN_STEP;
        }
+
+       return t->pl.state == SCTP_PL_COMPLETE;
 }
 
 static bool sctp_transport_pl_toobig(struct sctp_transport *t, u32 pmtu)
index 8983896..c038efc 100644 (file)
@@ -795,7 +795,7 @@ static int smc_connect_rdma(struct smc_sock *smc,
                        reason_code = SMC_CLC_DECL_NOSRVLINK;
                        goto connect_abort;
                }
-               smc->conn.lnk = link;
+               smc_switch_link_and_count(&smc->conn, link);
        }
 
        /* create send buffer and rmb */
index cd0d7c9..c160ff5 100644 (file)
@@ -917,8 +917,8 @@ static int smc_switch_cursor(struct smc_sock *smc, struct smc_cdc_tx_pend *pend,
        return rc;
 }
 
-static void smc_switch_link_and_count(struct smc_connection *conn,
-                                     struct smc_link *to_lnk)
+void smc_switch_link_and_count(struct smc_connection *conn,
+                              struct smc_link *to_lnk)
 {
        atomic_dec(&conn->lnk->conn_cnt);
        conn->lnk = to_lnk;
index 6d6fd13..c043ecd 100644 (file)
@@ -97,6 +97,7 @@ struct smc_link {
        unsigned long           *wr_tx_mask;    /* bit mask of used indexes */
        u32                     wr_tx_cnt;      /* number of WR send buffers */
        wait_queue_head_t       wr_tx_wait;     /* wait for free WR send buf */
+       atomic_t                wr_tx_refcnt;   /* tx refs to link */
 
        struct smc_wr_buf       *wr_rx_bufs;    /* WR recv payload buffers */
        struct ib_recv_wr       *wr_rx_ibs;     /* WR recv meta data */
@@ -109,6 +110,7 @@ struct smc_link {
 
        struct ib_reg_wr        wr_reg;         /* WR register memory region */
        wait_queue_head_t       wr_reg_wait;    /* wait for wr_reg result */
+       atomic_t                wr_reg_refcnt;  /* reg refs to link */
        enum smc_wr_reg_state   wr_reg_state;   /* state of wr_reg request */
 
        u8                      gid[SMC_GID_SIZE];/* gid matching used vlan id*/
@@ -444,6 +446,8 @@ void smc_core_exit(void);
 int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk,
                   u8 link_idx, struct smc_init_info *ini);
 void smcr_link_clear(struct smc_link *lnk, bool log);
+void smc_switch_link_and_count(struct smc_connection *conn,
+                              struct smc_link *to_lnk);
 int smcr_buf_map_lgr(struct smc_link *lnk);
 int smcr_buf_reg_lgr(struct smc_link *lnk);
 void smcr_lgr_set_type(struct smc_link_group *lgr, enum smc_lgr_type new_type);
index 273eaf1..2e7560e 100644 (file)
@@ -888,6 +888,7 @@ int smc_llc_cli_add_link(struct smc_link *link, struct smc_llc_qentry *qentry)
        if (!rc)
                goto out;
 out_clear_lnk:
+       lnk_new->state = SMC_LNK_INACTIVE;
        smcr_link_clear(lnk_new, false);
 out_reject:
        smc_llc_cli_add_link_reject(qentry);
@@ -1184,6 +1185,7 @@ int smc_llc_srv_add_link(struct smc_link *link)
                goto out_err;
        return 0;
 out_err:
+       link_new->state = SMC_LNK_INACTIVE;
        smcr_link_clear(link_new, false);
        return rc;
 }
@@ -1286,10 +1288,8 @@ static void smc_llc_process_cli_delete_link(struct smc_link_group *lgr)
        del_llc->reason = 0;
        smc_llc_send_message(lnk, &qentry->msg); /* response */
 
-       if (smc_link_downing(&lnk_del->state)) {
-               if (smc_switch_conns(lgr, lnk_del, false))
-                       smc_wr_tx_wait_no_pending_sends(lnk_del);
-       }
+       if (smc_link_downing(&lnk_del->state))
+               smc_switch_conns(lgr, lnk_del, false);
        smcr_link_clear(lnk_del, true);
 
        active_links = smc_llc_active_link_count(lgr);
@@ -1805,8 +1805,6 @@ void smc_llc_link_clear(struct smc_link *link, bool log)
                                    link->smcibdev->ibdev->name, link->ibport);
        complete(&link->llc_testlink_resp);
        cancel_delayed_work_sync(&link->llc_testlink_wrk);
-       smc_wr_wakeup_reg_wait(link);
-       smc_wr_wakeup_tx_wait(link);
 }
 
 /* register a new rtoken at the remote peer (for all links) */
index 289025c..c79361d 100644 (file)
@@ -496,7 +496,7 @@ static int smc_tx_rdma_writes(struct smc_connection *conn,
 /* Wakeup sndbuf consumers from any context (IRQ or process)
  * since there is more data to transmit; usable snd_wnd as max transmit
  */
-static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
+static int _smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
 {
        struct smc_cdc_producer_flags *pflags = &conn->local_tx_ctrl.prod_flags;
        struct smc_link *link = conn->lnk;
@@ -550,6 +550,22 @@ out_unlock:
        return rc;
 }
 
+static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
+{
+       struct smc_link *link = conn->lnk;
+       int rc = -ENOLINK;
+
+       if (!link)
+               return rc;
+
+       atomic_inc(&link->wr_tx_refcnt);
+       if (smc_link_usable(link))
+               rc = _smcr_tx_sndbuf_nonempty(conn);
+       if (atomic_dec_and_test(&link->wr_tx_refcnt))
+               wake_up_all(&link->wr_tx_wait);
+       return rc;
+}
+
 static int smcd_tx_sndbuf_nonempty(struct smc_connection *conn)
 {
        struct smc_cdc_producer_flags *pflags = &conn->local_tx_ctrl.prod_flags;
index cbc73a7..a419e9a 100644 (file)
@@ -322,9 +322,12 @@ int smc_wr_reg_send(struct smc_link *link, struct ib_mr *mr)
        if (rc)
                return rc;
 
+       atomic_inc(&link->wr_reg_refcnt);
        rc = wait_event_interruptible_timeout(link->wr_reg_wait,
                                              (link->wr_reg_state != POSTED),
                                              SMC_WR_REG_MR_WAIT_TIME);
+       if (atomic_dec_and_test(&link->wr_reg_refcnt))
+               wake_up_all(&link->wr_reg_wait);
        if (!rc) {
                /* timeout - terminate link */
                smcr_link_down_cond_sched(link);
@@ -566,10 +569,15 @@ void smc_wr_free_link(struct smc_link *lnk)
                return;
        ibdev = lnk->smcibdev->ibdev;
 
+       smc_wr_wakeup_reg_wait(lnk);
+       smc_wr_wakeup_tx_wait(lnk);
+
        if (smc_wr_tx_wait_no_pending_sends(lnk))
                memset(lnk->wr_tx_mask, 0,
                       BITS_TO_LONGS(SMC_WR_BUF_CNT) *
                                                sizeof(*lnk->wr_tx_mask));
+       wait_event(lnk->wr_reg_wait, (!atomic_read(&lnk->wr_reg_refcnt)));
+       wait_event(lnk->wr_tx_wait, (!atomic_read(&lnk->wr_tx_refcnt)));
 
        if (lnk->wr_rx_dma_addr) {
                ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr,
@@ -728,7 +736,9 @@ int smc_wr_create_link(struct smc_link *lnk)
        memset(lnk->wr_tx_mask, 0,
               BITS_TO_LONGS(SMC_WR_BUF_CNT) * sizeof(*lnk->wr_tx_mask));
        init_waitqueue_head(&lnk->wr_tx_wait);
+       atomic_set(&lnk->wr_tx_refcnt, 0);
        init_waitqueue_head(&lnk->wr_reg_wait);
+       atomic_set(&lnk->wr_reg_refcnt, 0);
        return rc;
 
 dma_unmap:
index d66a8e4..dbb4182 100644 (file)
@@ -835,7 +835,8 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
                rqstp->rq_stime = ktime_get();
                rqstp->rq_reserved = serv->sv_max_mesg;
                atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
-       }
+       } else
+               svc_xprt_received(xprt);
 out:
        trace_svc_handle_xprt(xprt, len);
        return len;
index e573dce..b7dbdcb 100644 (file)
@@ -3149,24 +3149,6 @@ void cleanup_socket_xprt(void)
        xprt_unregister_transport(&xs_bc_tcp_transport);
 }
 
-static int param_set_uint_minmax(const char *val,
-               const struct kernel_param *kp,
-               unsigned int min, unsigned int max)
-{
-       unsigned int num;
-       int ret;
-
-       if (!val)
-               return -EINVAL;
-       ret = kstrtouint(val, 0, &num);
-       if (ret)
-               return ret;
-       if (num < min || num > max)
-               return -EINVAL;
-       *((unsigned int *)kp->arg) = num;
-       return 0;
-}
-
 static int param_set_portnr(const char *val, const struct kernel_param *kp)
 {
        return param_set_uint_minmax(val, kp,
index e5c43d4..c9391d3 100644 (file)
@@ -898,16 +898,10 @@ static int tipc_aead_decrypt(struct net *net, struct tipc_aead *aead,
        if (unlikely(!aead))
                return -ENOKEY;
 
-       /* Cow skb data if needed */
-       if (likely(!skb_cloned(skb) &&
-                  (!skb_is_nonlinear(skb) || !skb_has_frag_list(skb)))) {
-               nsg = 1 + skb_shinfo(skb)->nr_frags;
-       } else {
-               nsg = skb_cow_data(skb, 0, &unused);
-               if (unlikely(nsg < 0)) {
-                       pr_err("RX: skb_cow_data() returned %d\n", nsg);
-                       return nsg;
-               }
+       nsg = skb_cow_data(skb, 0, &unused);
+       if (unlikely(nsg < 0)) {
+               pr_err("RX: skb_cow_data() returned %d\n", nsg);
+               return nsg;
        }
 
        /* Allocate memory for the AEAD operation */
index cf58684..1b7a487 100644 (file)
@@ -913,7 +913,7 @@ static int link_schedule_user(struct tipc_link *l, struct tipc_msg *hdr)
        skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
                              dnode, l->addr, dport, 0, 0);
        if (!skb)
-               return -ENOMEM;
+               return -ENOBUFS;
        msg_set_dest_droppable(buf_msg(skb), true);
        TIPC_SKB_CB(skb)->chain_imp = msg_importance(hdr);
        skb_queue_tail(&l->wakeupq, skb);
@@ -1031,7 +1031,7 @@ void tipc_link_reset(struct tipc_link *l)
  *
  * Consumes the buffer chain.
  * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
- * Return: 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS or -ENOMEM
+ * Return: 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
  */
 int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
                   struct sk_buff_head *xmitq)
@@ -1089,7 +1089,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
                        if (!_skb) {
                                kfree_skb(skb);
                                __skb_queue_purge(list);
-                               return -ENOMEM;
+                               return -ENOBUFS;
                        }
                        __skb_queue_tail(transmq, skb);
                        tipc_link_set_skb_retransmit_time(skb, l);
index 34a97ea..8754bd8 100644 (file)
@@ -158,6 +158,7 @@ static void tipc_sk_remove(struct tipc_sock *tsk);
 static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz);
 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz);
 static void tipc_sk_push_backlog(struct tipc_sock *tsk, bool nagle_ack);
+static int tipc_wait_for_connect(struct socket *sock, long *timeo_p);
 
 static const struct proto_ops packet_ops;
 static const struct proto_ops stream_ops;
@@ -1515,8 +1516,13 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
                rc = 0;
        }
 
-       if (unlikely(syn && !rc))
+       if (unlikely(syn && !rc)) {
                tipc_set_sk_state(sk, TIPC_CONNECTING);
+               if (dlen && timeout) {
+                       timeout = msecs_to_jiffies(timeout);
+                       tipc_wait_for_connect(sock, &timeout);
+               }
+       }
 
        return rc ? rc : dlen;
 }
@@ -1564,7 +1570,7 @@ static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
                return -EMSGSIZE;
 
        /* Handle implicit connection setup */
-       if (unlikely(dest)) {
+       if (unlikely(dest && sk->sk_state == TIPC_OPEN)) {
                rc = __tipc_sendmsg(sock, m, dlen);
                if (dlen && dlen == rc) {
                        tsk->peer_caps = tipc_node_get_capabilities(net, dnode);
@@ -2646,7 +2652,7 @@ static int tipc_listen(struct socket *sock, int len)
 static int tipc_wait_for_accept(struct socket *sock, long timeo)
 {
        struct sock *sk = sock->sk;
-       DEFINE_WAIT(wait);
+       DEFINE_WAIT_FUNC(wait, woken_wake_function);
        int err;
 
        /* True wake-one mechanism for incoming connections: only
@@ -2655,12 +2661,12 @@ static int tipc_wait_for_accept(struct socket *sock, long timeo)
         * anymore, the common case will execute the loop only once.
        */
        for (;;) {
-               prepare_to_wait_exclusive(sk_sleep(sk), &wait,
-                                         TASK_INTERRUPTIBLE);
                if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
+                       add_wait_queue(sk_sleep(sk), &wait);
                        release_sock(sk);
-                       timeo = schedule_timeout(timeo);
+                       timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
                        lock_sock(sk);
+                       remove_wait_queue(sk_sleep(sk), &wait);
                }
                err = 0;
                if (!skb_queue_empty(&sk->sk_receive_queue))
@@ -2672,7 +2678,6 @@ static int tipc_wait_for_accept(struct socket *sock, long timeo)
                if (signal_pending(current))
                        break;
        }
-       finish_wait(sk_sleep(sk), &wait);
        return err;
 }
 
@@ -2689,9 +2694,10 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
                       bool kern)
 {
        struct sock *new_sk, *sk = sock->sk;
-       struct sk_buff *buf;
        struct tipc_sock *new_tsock;
+       struct msghdr m = {NULL,};
        struct tipc_msg *msg;
+       struct sk_buff *buf;
        long timeo;
        int res;
 
@@ -2737,19 +2743,17 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
        }
 
        /*
-        * Respond to 'SYN-' by discarding it & returning 'ACK'-.
-        * Respond to 'SYN+' by queuing it on new socket.
+        * Respond to 'SYN-' by discarding it & returning 'ACK'.
+        * Respond to 'SYN+' by queuing it on new socket & returning 'ACK'.
         */
        if (!msg_data_sz(msg)) {
-               struct msghdr m = {NULL,};
-
                tsk_advance_rx_queue(sk);
-               __tipc_sendstream(new_sock, &m, 0);
        } else {
                __skb_dequeue(&sk->sk_receive_queue);
                __skb_queue_head(&new_sk->sk_receive_queue, buf);
                skb_set_owner_r(buf, new_sk);
        }
+       __tipc_sendstream(new_sock, &m, 0);
        release_sock(new_sk);
 exit:
        release_sock(sk);
index 23c92ad..ba7ced9 100644 (file)
@@ -1526,6 +1526,53 @@ out:
        return err;
 }
 
+static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb)
+{
+       scm->fp = scm_fp_dup(UNIXCB(skb).fp);
+
+       /*
+        * Garbage collection of unix sockets starts by selecting a set of
+        * candidate sockets which have reference only from being in flight
+        * (total_refs == inflight_refs).  This condition is checked once during
+        * the candidate collection phase, and candidates are marked as such, so
+        * that non-candidates can later be ignored.  While inflight_refs is
+        * protected by unix_gc_lock, total_refs (file count) is not, hence this
+        * is an instantaneous decision.
+        *
+        * Once a candidate, however, the socket must not be reinstalled into a
+        * file descriptor while the garbage collection is in progress.
+        *
+        * If the above conditions are met, then the directed graph of
+        * candidates (*) does not change while unix_gc_lock is held.
+        *
+        * Any operations that changes the file count through file descriptors
+        * (dup, close, sendmsg) does not change the graph since candidates are
+        * not installed in fds.
+        *
+        * Dequeing a candidate via recvmsg would install it into an fd, but
+        * that takes unix_gc_lock to decrement the inflight count, so it's
+        * serialized with garbage collection.
+        *
+        * MSG_PEEK is special in that it does not change the inflight count,
+        * yet does install the socket into an fd.  The following lock/unlock
+        * pair is to ensure serialization with garbage collection.  It must be
+        * done between incrementing the file count and installing the file into
+        * an fd.
+        *
+        * If garbage collection starts after the barrier provided by the
+        * lock/unlock, then it will see the elevated refcount and not mark this
+        * as a candidate.  If a garbage collection is already in progress
+        * before the file count was incremented, then the lock/unlock pair will
+        * ensure that garbage collection is finished before progressing to
+        * installing the fd.
+        *
+        * (*) A -> B where B is on the queue of A or B is on the queue of C
+        * which is on the queue of listening socket A.
+        */
+       spin_lock(&unix_gc_lock);
+       spin_unlock(&unix_gc_lock);
+}
+
 static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
 {
        int err = 0;
@@ -2175,7 +2222,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
                sk_peek_offset_fwd(sk, size);
 
                if (UNIXCB(skb).fp)
-                       scm.fp = scm_fp_dup(UNIXCB(skb).fp);
+                       unix_peek_fds(&scm, skb);
        }
        err = (flags & MSG_TRUNC) ? skb->len - skip : size;
 
@@ -2418,7 +2465,7 @@ unlock:
                        /* It is questionable, see note in unix_dgram_recvmsg.
                         */
                        if (UNIXCB(skb).fp)
-                               scm.fp = scm_fp_dup(UNIXCB(skb).fp);
+                               unix_peek_fds(&scm, skb);
 
                        sk_peek_offset_fwd(sk, chunk);
 
index e0c2c99..4f7c99d 100644 (file)
@@ -357,11 +357,14 @@ static void virtio_vsock_event_fill(struct virtio_vsock *vsock)
 
 static void virtio_vsock_reset_sock(struct sock *sk)
 {
-       lock_sock(sk);
+       /* vmci_transport.c doesn't take sk_lock here either.  At least we're
+        * under vsock_table_lock so the sock cannot disappear while we're
+        * executing.
+        */
+
        sk->sk_state = TCP_CLOSE;
        sk->sk_err = ECONNRESET;
        sk_error_report(sk);
-       release_sock(sk);
 }
 
 static void virtio_vsock_update_guest_cid(struct virtio_vsock *vsock)
index 169ba8b..081e7ae 100644 (file)
@@ -1079,6 +1079,9 @@ virtio_transport_recv_connected(struct sock *sk,
                virtio_transport_recv_enqueue(vsk, pkt);
                sk->sk_data_ready(sk);
                return err;
+       case VIRTIO_VSOCK_OP_CREDIT_REQUEST:
+               virtio_transport_send_credit_update(vsk);
+               break;
        case VIRTIO_VSOCK_OP_CREDIT_UPDATE:
                sk->sk_write_space(sk);
                break;
index 50eb405..16c88be 100644 (file)
@@ -2351,7 +2351,10 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
                        goto nla_put_failure;
 
                for (band = state->band_start;
-                    band < NUM_NL80211_BANDS; band++) {
+                    band < (state->split ?
+                               NUM_NL80211_BANDS :
+                               NL80211_BAND_60GHZ + 1);
+                    band++) {
                        struct ieee80211_supported_band *sband;
 
                        /* omit higher bands for ancient software */
index f03c7ac..7897b14 100644 (file)
@@ -1754,16 +1754,14 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
                         * be grouped with this beacon for updates ...
                         */
                        if (!cfg80211_combine_bsses(rdev, new)) {
-                               kfree(new);
+                               bss_ref_put(rdev, new);
                                goto drop;
                        }
                }
 
                if (rdev->bss_entries >= bss_entries_limit &&
                    !cfg80211_bss_expire_oldest(rdev)) {
-                       if (!list_empty(&new->hidden_list))
-                               list_del(&new->hidden_list);
-                       kfree(new);
+                       bss_ref_put(rdev, new);
                        goto drop;
                }
 
index a20aec9..2bf2693 100644 (file)
@@ -298,8 +298,16 @@ static int xfrm_xlate64(struct sk_buff *dst, const struct nlmsghdr *nlh_src)
        len = nlmsg_attrlen(nlh_src, xfrm_msg_min[type]);
 
        nla_for_each_attr(nla, attrs, len, remaining) {
-               int err = xfrm_xlate64_attr(dst, nla);
+               int err;
 
+               switch (type) {
+               case XFRM_MSG_NEWSPDINFO:
+                       err = xfrm_nla_cpy(dst, nla, nla_len(nla));
+                       break;
+               default:
+                       err = xfrm_xlate64_attr(dst, nla);
+                       break;
+               }
                if (err)
                        return err;
        }
@@ -341,7 +349,8 @@ static int xfrm_alloc_compat(struct sk_buff *skb, const struct nlmsghdr *nlh_src
 
 /* Calculates len of translated 64-bit message. */
 static size_t xfrm_user_rcv_calculate_len64(const struct nlmsghdr *src,
-                                           struct nlattr *attrs[XFRMA_MAX+1])
+                                           struct nlattr *attrs[XFRMA_MAX + 1],
+                                           int maxtype)
 {
        size_t len = nlmsg_len(src);
 
@@ -358,10 +367,20 @@ static size_t xfrm_user_rcv_calculate_len64(const struct nlmsghdr *src,
        case XFRM_MSG_POLEXPIRE:
                len += 8;
                break;
+       case XFRM_MSG_NEWSPDINFO:
+               /* attirbutes are xfrm_spdattr_type_t, not xfrm_attr_type_t */
+               return len;
        default:
                break;
        }
 
+       /* Unexpected for anything, but XFRM_MSG_NEWSPDINFO, please
+        * correct both 64=>32-bit and 32=>64-bit translators to copy
+        * new attributes.
+        */
+       if (WARN_ON_ONCE(maxtype))
+               return len;
+
        if (attrs[XFRMA_SA])
                len += 4;
        if (attrs[XFRMA_POLICY])
@@ -440,7 +459,8 @@ static int xfrm_xlate32_attr(void *dst, const struct nlattr *nla,
 
 static int xfrm_xlate32(struct nlmsghdr *dst, const struct nlmsghdr *src,
                        struct nlattr *attrs[XFRMA_MAX+1],
-                       size_t size, u8 type, struct netlink_ext_ack *extack)
+                       size_t size, u8 type, int maxtype,
+                       struct netlink_ext_ack *extack)
 {
        size_t pos;
        int i;
@@ -520,6 +540,25 @@ static int xfrm_xlate32(struct nlmsghdr *dst, const struct nlmsghdr *src,
        }
        pos = dst->nlmsg_len;
 
+       if (maxtype) {
+               /* attirbutes are xfrm_spdattr_type_t, not xfrm_attr_type_t */
+               WARN_ON_ONCE(src->nlmsg_type != XFRM_MSG_NEWSPDINFO);
+
+               for (i = 1; i <= maxtype; i++) {
+                       int err;
+
+                       if (!attrs[i])
+                               continue;
+
+                       /* just copy - no need for translation */
+                       err = xfrm_attr_cpy32(dst, &pos, attrs[i], size,
+                                       nla_len(attrs[i]), nla_len(attrs[i]));
+                       if (err)
+                               return err;
+               }
+               return 0;
+       }
+
        for (i = 1; i < XFRMA_MAX + 1; i++) {
                int err;
 
@@ -564,7 +603,7 @@ static struct nlmsghdr *xfrm_user_rcv_msg_compat(const struct nlmsghdr *h32,
        if (err < 0)
                return ERR_PTR(err);
 
-       len = xfrm_user_rcv_calculate_len64(h32, attrs);
+       len = xfrm_user_rcv_calculate_len64(h32, attrs, maxtype);
        /* The message doesn't need translation */
        if (len == nlmsg_len(h32))
                return NULL;
@@ -574,7 +613,7 @@ static struct nlmsghdr *xfrm_user_rcv_msg_compat(const struct nlmsghdr *h32,
        if (!h64)
                return ERR_PTR(-ENOMEM);
 
-       err = xfrm_xlate32(h64, h32, attrs, len, type, extack);
+       err = xfrm_xlate32(h64, h32, attrs, len, type, maxtype, extack);
        if (err < 0) {
                kvfree(h64);
                return ERR_PTR(err);
index 2e8afe0..cb40ff0 100644 (file)
@@ -241,7 +241,7 @@ static void ipcomp_free_tfms(struct crypto_comp * __percpu *tfms)
                        break;
        }
 
-       WARN_ON(!pos);
+       WARN_ON(list_entry_is_head(pos, &ipcomp_tfms_list, list));
 
        if (--pos->users)
                return;
index 827d842..7f881f5 100644 (file)
@@ -155,7 +155,6 @@ static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1]
                                                __read_mostly;
 
 static struct kmem_cache *xfrm_dst_cache __ro_after_init;
-static __read_mostly seqcount_mutex_t xfrm_policy_hash_generation;
 
 static struct rhashtable xfrm_policy_inexact_table;
 static const struct rhashtable_params xfrm_pol_inexact_params;
@@ -585,7 +584,7 @@ static void xfrm_bydst_resize(struct net *net, int dir)
                return;
 
        spin_lock_bh(&net->xfrm.xfrm_policy_lock);
-       write_seqcount_begin(&xfrm_policy_hash_generation);
+       write_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
 
        odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
                                lockdep_is_held(&net->xfrm.xfrm_policy_lock));
@@ -596,7 +595,7 @@ static void xfrm_bydst_resize(struct net *net, int dir)
        rcu_assign_pointer(net->xfrm.policy_bydst[dir].table, ndst);
        net->xfrm.policy_bydst[dir].hmask = nhashmask;
 
-       write_seqcount_end(&xfrm_policy_hash_generation);
+       write_seqcount_end(&net->xfrm.xfrm_policy_hash_generation);
        spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
 
        synchronize_rcu();
@@ -1245,7 +1244,7 @@ static void xfrm_hash_rebuild(struct work_struct *work)
        } while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq));
 
        spin_lock_bh(&net->xfrm.xfrm_policy_lock);
-       write_seqcount_begin(&xfrm_policy_hash_generation);
+       write_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
 
        /* make sure that we can insert the indirect policies again before
         * we start with destructive action.
@@ -1354,7 +1353,7 @@ static void xfrm_hash_rebuild(struct work_struct *work)
 
 out_unlock:
        __xfrm_policy_inexact_flush(net);
-       write_seqcount_end(&xfrm_policy_hash_generation);
+       write_seqcount_end(&net->xfrm.xfrm_policy_hash_generation);
        spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
 
        mutex_unlock(&hash_resize_mutex);
@@ -2091,15 +2090,12 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
        if (unlikely(!daddr || !saddr))
                return NULL;
 
- retry:
-       sequence = read_seqcount_begin(&xfrm_policy_hash_generation);
        rcu_read_lock();
-
-       chain = policy_hash_direct(net, daddr, saddr, family, dir);
-       if (read_seqcount_retry(&xfrm_policy_hash_generation, sequence)) {
-               rcu_read_unlock();
-               goto retry;
-       }
+ retry:
+       do {
+               sequence = read_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
+               chain = policy_hash_direct(net, daddr, saddr, family, dir);
+       } while (read_seqcount_retry(&net->xfrm.xfrm_policy_hash_generation, sequence));
 
        ret = NULL;
        hlist_for_each_entry_rcu(pol, chain, bydst) {
@@ -2130,15 +2126,11 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
        }
 
 skip_inexact:
-       if (read_seqcount_retry(&xfrm_policy_hash_generation, sequence)) {
-               rcu_read_unlock();
+       if (read_seqcount_retry(&net->xfrm.xfrm_policy_hash_generation, sequence))
                goto retry;
-       }
 
-       if (ret && !xfrm_pol_hold_rcu(ret)) {
-               rcu_read_unlock();
+       if (ret && !xfrm_pol_hold_rcu(ret))
                goto retry;
-       }
 fail:
        rcu_read_unlock();
 
@@ -4089,6 +4081,7 @@ static int __net_init xfrm_net_init(struct net *net)
        /* Initialize the per-net locks here */
        spin_lock_init(&net->xfrm.xfrm_state_lock);
        spin_lock_init(&net->xfrm.xfrm_policy_lock);
+       seqcount_spinlock_init(&net->xfrm.xfrm_policy_hash_generation, &net->xfrm.xfrm_policy_lock);
        mutex_init(&net->xfrm.xfrm_cfg_mutex);
 
        rv = xfrm_statistics_init(net);
@@ -4133,7 +4126,6 @@ void __init xfrm_init(void)
 {
        register_pernet_subsys(&xfrm_net_ops);
        xfrm_dev_init();
-       seqcount_mutex_init(&xfrm_policy_hash_generation, &hash_resize_mutex);
        xfrm_input_init();
 
 #ifdef CONFIG_XFRM_ESPINTCP
index b47d613..7aff641 100644 (file)
@@ -2811,6 +2811,16 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
 
        err = link->doit(skb, nlh, attrs);
 
+       /* We need to free skb allocated in xfrm_alloc_compat() before
+        * returning from this function, because consume_skb() won't take
+        * care of frag_list since netlink destructor sets
+        * sbk->head to NULL. (see netlink_skb_destructor())
+        */
+       if (skb_has_frag_list(skb)) {
+               kfree_skb(skb_shinfo(skb)->frag_list);
+               skb_shinfo(skb)->frag_list = NULL;
+       }
+
 err:
        kvfree(nlh64);
        return err;
index 9c7fbd4..0e7bab3 100755 (executable)
@@ -14,9 +14,9 @@ if [ $? -ne 0 ]; then
 fi
 
 cat <<EOF |
-asm-generic/atomic-instrumented.h
-asm-generic/atomic-long.h
-linux/atomic-arch-fallback.h
+linux/atomic/atomic-instrumented.h
+linux/atomic/atomic-long.h
+linux/atomic/atomic-arch-fallback.h
 EOF
 while read header; do
        OLDSUM="$(tail -n 1 ${LINUXDIR}/include/${header})"
index 59c0052..ef76408 100755 (executable)
@@ -1,8 +1,8 @@
 cat <<EOF
 static __always_inline ${ret}
-${arch}${atomic}_${pfx}${name}${sfx}_acquire(${params})
+arch_${atomic}_${pfx}${name}${sfx}_acquire(${params})
 {
-       ${ret} ret = ${arch}${atomic}_${pfx}${name}${sfx}_relaxed(${args});
+       ${ret} ret = arch_${atomic}_${pfx}${name}${sfx}_relaxed(${args});
        __atomic_acquire_fence();
        return ret;
 }
index a66635b..15caa2e 100755 (executable)
@@ -1,6 +1,6 @@
 cat <<EOF
 /**
- * ${arch}${atomic}_add_negative - add and test if negative
+ * arch_${atomic}_add_negative - add and test if negative
  * @i: integer value to add
  * @v: pointer of type ${atomic}_t
  *
@@ -9,8 +9,8 @@ cat <<EOF
  * result is greater than or equal to zero.
  */
 static __always_inline bool
-${arch}${atomic}_add_negative(${int} i, ${atomic}_t *v)
+arch_${atomic}_add_negative(${int} i, ${atomic}_t *v)
 {
-       return ${arch}${atomic}_add_return(i, v) < 0;
+       return arch_${atomic}_add_return(i, v) < 0;
 }
 EOF
index 2ff598a..9e5159c 100755 (executable)
@@ -1,6 +1,6 @@
 cat << EOF
 /**
- * ${arch}${atomic}_add_unless - add unless the number is already a given value
+ * arch_${atomic}_add_unless - add unless the number is already a given value
  * @v: pointer of type ${atomic}_t
  * @a: the amount to add to v...
  * @u: ...unless v is equal to u.
@@ -9,8 +9,8 @@ cat << EOF
  * Returns true if the addition was done.
  */
 static __always_inline bool
-${arch}${atomic}_add_unless(${atomic}_t *v, ${int} a, ${int} u)
+arch_${atomic}_add_unless(${atomic}_t *v, ${int} a, ${int} u)
 {
-       return ${arch}${atomic}_fetch_add_unless(v, a, u) != u;
+       return arch_${atomic}_fetch_add_unless(v, a, u) != u;
 }
 EOF
index 3f18663..5a42f54 100755 (executable)
@@ -1,7 +1,7 @@
 cat <<EOF
 static __always_inline ${ret}
-${arch}${atomic}_${pfx}andnot${sfx}${order}(${int} i, ${atomic}_t *v)
+arch_${atomic}_${pfx}andnot${sfx}${order}(${int} i, ${atomic}_t *v)
 {
-       ${retstmt}${arch}${atomic}_${pfx}and${sfx}${order}(~i, v);
+       ${retstmt}arch_${atomic}_${pfx}and${sfx}${order}(~i, v);
 }
 EOF
index e2e01f0..8c144c8 100755 (executable)
@@ -1,7 +1,7 @@
 cat <<EOF
 static __always_inline ${ret}
-${arch}${atomic}_${pfx}dec${sfx}${order}(${atomic}_t *v)
+arch_${atomic}_${pfx}dec${sfx}${order}(${atomic}_t *v)
 {
-       ${retstmt}${arch}${atomic}_${pfx}sub${sfx}${order}(1, v);
+       ${retstmt}arch_${atomic}_${pfx}sub${sfx}${order}(1, v);
 }
 EOF
index e8a5e49..8549f35 100755 (executable)
@@ -1,6 +1,6 @@
 cat <<EOF
 /**
- * ${arch}${atomic}_dec_and_test - decrement and test
+ * arch_${atomic}_dec_and_test - decrement and test
  * @v: pointer of type ${atomic}_t
  *
  * Atomically decrements @v by 1 and
@@ -8,8 +8,8 @@ cat <<EOF
  * cases.
  */
 static __always_inline bool
-${arch}${atomic}_dec_and_test(${atomic}_t *v)
+arch_${atomic}_dec_and_test(${atomic}_t *v)
 {
-       return ${arch}${atomic}_dec_return(v) == 0;
+       return arch_${atomic}_dec_return(v) == 0;
 }
 EOF
index 527adec..86bdced 100755 (executable)
@@ -1,14 +1,14 @@
 cat <<EOF
 static __always_inline ${ret}
-${arch}${atomic}_dec_if_positive(${atomic}_t *v)
+arch_${atomic}_dec_if_positive(${atomic}_t *v)
 {
-       ${int} dec, c = ${arch}${atomic}_read(v);
+       ${int} dec, c = arch_${atomic}_read(v);
 
        do {
                dec = c - 1;
                if (unlikely(dec < 0))
                        break;
-       } while (!${arch}${atomic}_try_cmpxchg(v, &c, dec));
+       } while (!arch_${atomic}_try_cmpxchg(v, &c, dec));
 
        return dec;
 }
index dcab684..c531d5a 100755 (executable)
@@ -1,13 +1,13 @@
 cat <<EOF
 static __always_inline bool
-${arch}${atomic}_dec_unless_positive(${atomic}_t *v)
+arch_${atomic}_dec_unless_positive(${atomic}_t *v)
 {
-       ${int} c = ${arch}${atomic}_read(v);
+       ${int} c = arch_${atomic}_read(v);
 
        do {
                if (unlikely(c > 0))
                        return false;
-       } while (!${arch}${atomic}_try_cmpxchg(v, &c, c - 1));
+       } while (!arch_${atomic}_try_cmpxchg(v, &c, c - 1));
 
        return true;
 }
index 3764fc8..07757d8 100755 (executable)
@@ -1,10 +1,10 @@
 cat <<EOF
 static __always_inline ${ret}
-${arch}${atomic}_${pfx}${name}${sfx}(${params})
+arch_${atomic}_${pfx}${name}${sfx}(${params})
 {
        ${ret} ret;
        __atomic_pre_full_fence();
-       ret = ${arch}${atomic}_${pfx}${name}${sfx}_relaxed(${args});
+       ret = arch_${atomic}_${pfx}${name}${sfx}_relaxed(${args});
        __atomic_post_full_fence();
        return ret;
 }
index 0e0b9ae..68ce13c 100755 (executable)
@@ -1,6 +1,6 @@
 cat << EOF
 /**
- * ${arch}${atomic}_fetch_add_unless - add unless the number is already a given value
+ * arch_${atomic}_fetch_add_unless - add unless the number is already a given value
  * @v: pointer of type ${atomic}_t
  * @a: the amount to add to v...
  * @u: ...unless v is equal to u.
@@ -9,14 +9,14 @@ cat << EOF
  * Returns original value of @v
  */
 static __always_inline ${int}
-${arch}${atomic}_fetch_add_unless(${atomic}_t *v, ${int} a, ${int} u)
+arch_${atomic}_fetch_add_unless(${atomic}_t *v, ${int} a, ${int} u)
 {
-       ${int} c = ${arch}${atomic}_read(v);
+       ${int} c = arch_${atomic}_read(v);
 
        do {
                if (unlikely(c == u))
                        break;
-       } while (!${arch}${atomic}_try_cmpxchg(v, &c, c + a));
+       } while (!arch_${atomic}_try_cmpxchg(v, &c, c + a));
 
        return c;
 }
index 15ec629..3c2c373 100755 (executable)
@@ -1,7 +1,7 @@
 cat <<EOF
 static __always_inline ${ret}
-${arch}${atomic}_${pfx}inc${sfx}${order}(${atomic}_t *v)
+arch_${atomic}_${pfx}inc${sfx}${order}(${atomic}_t *v)
 {
-       ${retstmt}${arch}${atomic}_${pfx}add${sfx}${order}(1, v);
+       ${retstmt}arch_${atomic}_${pfx}add${sfx}${order}(1, v);
 }
 EOF
index cecc832..0cf23fe 100755 (executable)
@@ -1,6 +1,6 @@
 cat <<EOF
 /**
- * ${arch}${atomic}_inc_and_test - increment and test
+ * arch_${atomic}_inc_and_test - increment and test
  * @v: pointer of type ${atomic}_t
  *
  * Atomically increments @v by 1
@@ -8,8 +8,8 @@ cat <<EOF
  * other cases.
  */
 static __always_inline bool
-${arch}${atomic}_inc_and_test(${atomic}_t *v)
+arch_${atomic}_inc_and_test(${atomic}_t *v)
 {
-       return ${arch}${atomic}_inc_return(v) == 0;
+       return arch_${atomic}_inc_return(v) == 0;
 }
 EOF
index 50f2d4d..ed8a1f5 100755 (executable)
@@ -1,14 +1,14 @@
 cat <<EOF
 /**
- * ${arch}${atomic}_inc_not_zero - increment unless the number is zero
+ * arch_${atomic}_inc_not_zero - increment unless the number is zero
  * @v: pointer of type ${atomic}_t
  *
  * Atomically increments @v by 1, if @v is non-zero.
  * Returns true if the increment was done.
  */
 static __always_inline bool
-${arch}${atomic}_inc_not_zero(${atomic}_t *v)
+arch_${atomic}_inc_not_zero(${atomic}_t *v)
 {
-       return ${arch}${atomic}_add_unless(v, 1, 0);
+       return arch_${atomic}_add_unless(v, 1, 0);
 }
 EOF
index 87629e0..95d8ce4 100755 (executable)
@@ -1,13 +1,13 @@
 cat <<EOF
 static __always_inline bool
-${arch}${atomic}_inc_unless_negative(${atomic}_t *v)
+arch_${atomic}_inc_unless_negative(${atomic}_t *v)
 {
-       ${int} c = ${arch}${atomic}_read(v);
+       ${int} c = arch_${atomic}_read(v);
 
        do {
                if (unlikely(c < 0))
                        return false;
-       } while (!${arch}${atomic}_try_cmpxchg(v, &c, c + 1));
+       } while (!arch_${atomic}_try_cmpxchg(v, &c, c + 1));
 
        return true;
 }
index 341a88d..803ba75 100755 (executable)
@@ -1,6 +1,6 @@
 cat <<EOF
 static __always_inline ${ret}
-${arch}${atomic}_read_acquire(const ${atomic}_t *v)
+arch_${atomic}_read_acquire(const ${atomic}_t *v)
 {
        return smp_load_acquire(&(v)->counter);
 }
index f8906d5..b46feb5 100755 (executable)
@@ -1,8 +1,8 @@
 cat <<EOF
 static __always_inline ${ret}
-${arch}${atomic}_${pfx}${name}${sfx}_release(${params})
+arch_${atomic}_${pfx}${name}${sfx}_release(${params})
 {
        __atomic_release_fence();
-       ${retstmt}${arch}${atomic}_${pfx}${name}${sfx}_relaxed(${args});
+       ${retstmt}arch_${atomic}_${pfx}${name}${sfx}_relaxed(${args});
 }
 EOF
index 7606827..86ede75 100755 (executable)
@@ -1,6 +1,6 @@
 cat <<EOF
 static __always_inline void
-${arch}${atomic}_set_release(${atomic}_t *v, ${int} i)
+arch_${atomic}_set_release(${atomic}_t *v, ${int} i)
 {
        smp_store_release(&(v)->counter, i);
 }
index c580f4c..260f373 100755 (executable)
@@ -1,6 +1,6 @@
 cat <<EOF
 /**
- * ${arch}${atomic}_sub_and_test - subtract value from variable and test result
+ * arch_${atomic}_sub_and_test - subtract value from variable and test result
  * @i: integer value to subtract
  * @v: pointer of type ${atomic}_t
  *
@@ -9,8 +9,8 @@ cat <<EOF
  * other cases.
  */
 static __always_inline bool
-${arch}${atomic}_sub_and_test(${int} i, ${atomic}_t *v)
+arch_${atomic}_sub_and_test(${int} i, ${atomic}_t *v)
 {
-       return ${arch}${atomic}_sub_return(i, v) == 0;
+       return arch_${atomic}_sub_return(i, v) == 0;
 }
 EOF
index 06db0f7..890f850 100755 (executable)
@@ -1,9 +1,9 @@
 cat <<EOF
 static __always_inline bool
-${arch}${atomic}_try_cmpxchg${order}(${atomic}_t *v, ${int} *old, ${int} new)
+arch_${atomic}_try_cmpxchg${order}(${atomic}_t *v, ${int} *old, ${int} new)
 {
        ${int} r, o = *old;
-       r = ${arch}${atomic}_cmpxchg${order}(v, o, new);
+       r = arch_${atomic}_cmpxchg${order}(v, o, new);
        if (unlikely(r != o))
                *old = r;
        return likely(r == o);
index 317a6ce..8e2da71 100755 (executable)
@@ -2,11 +2,10 @@
 # SPDX-License-Identifier: GPL-2.0
 
 ATOMICDIR=$(dirname $0)
-ARCH=$2
 
 . ${ATOMICDIR}/atomic-tbl.sh
 
-#gen_template_fallback(template, meta, pfx, name, sfx, order, arch, atomic, int, args...)
+#gen_template_fallback(template, meta, pfx, name, sfx, order, atomic, int, args...)
 gen_template_fallback()
 {
        local template="$1"; shift
@@ -15,11 +14,10 @@ gen_template_fallback()
        local name="$1"; shift
        local sfx="$1"; shift
        local order="$1"; shift
-       local arch="$1"; shift
        local atomic="$1"; shift
        local int="$1"; shift
 
-       local atomicname="${arch}${atomic}_${pfx}${name}${sfx}${order}"
+       local atomicname="arch_${atomic}_${pfx}${name}${sfx}${order}"
 
        local ret="$(gen_ret_type "${meta}" "${int}")"
        local retstmt="$(gen_ret_stmt "${meta}")"
@@ -34,7 +32,7 @@ gen_template_fallback()
        fi
 }
 
-#gen_proto_fallback(meta, pfx, name, sfx, order, arch, atomic, int, args...)
+#gen_proto_fallback(meta, pfx, name, sfx, order, atomic, int, args...)
 gen_proto_fallback()
 {
        local meta="$1"; shift
@@ -65,44 +63,26 @@ gen_proto_order_variant()
        local name="$1"; shift
        local sfx="$1"; shift
        local order="$1"; shift
-       local arch="$1"
-       local atomic="$2"
+       local atomic="$1"
 
-       local basename="${arch}${atomic}_${pfx}${name}${sfx}"
+       local basename="arch_${atomic}_${pfx}${name}${sfx}"
 
-       printf "#define arch_${basename}${order} ${basename}${order}\n"
+       printf "#define ${basename}${order} ${basename}${order}\n"
 }
 
-#gen_proto_order_variants(meta, pfx, name, sfx, arch, atomic, int, args...)
+#gen_proto_order_variants(meta, pfx, name, sfx, atomic, int, args...)
 gen_proto_order_variants()
 {
        local meta="$1"; shift
        local pfx="$1"; shift
        local name="$1"; shift
        local sfx="$1"; shift
-       local arch="$1"
-       local atomic="$2"
+       local atomic="$1"
 
-       local basename="${arch}${atomic}_${pfx}${name}${sfx}"
+       local basename="arch_${atomic}_${pfx}${name}${sfx}"
 
        local template="$(find_fallback_template "${pfx}" "${name}" "${sfx}" "${order}")"
 
-       if [ -z "$arch" ]; then
-               gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "" "$@"
-
-               if meta_has_acquire "${meta}"; then
-                       gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "_acquire" "$@"
-               fi
-               if meta_has_release "${meta}"; then
-                       gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "_release" "$@"
-               fi
-               if meta_has_relaxed "${meta}"; then
-                       gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "_relaxed" "$@"
-               fi
-
-               echo ""
-       fi
-
        # If we don't have relaxed atomics, then we don't bother with ordering fallbacks
        # read_acquire and set_release need to be templated, though
        if ! meta_has_relaxed "${meta}"; then
@@ -128,7 +108,7 @@ gen_proto_order_variants()
        gen_basic_fallbacks "${basename}"
 
        if [ ! -z "${template}" ]; then
-               printf "#endif /* ${arch}${atomic}_${pfx}${name}${sfx} */\n\n"
+               printf "#endif /* ${basename} */\n\n"
                gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "" "$@"
                gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_acquire" "$@"
                gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_release" "$@"
@@ -187,38 +167,38 @@ gen_try_cmpxchg_fallback()
        local order="$1"; shift;
 
 cat <<EOF
-#ifndef ${ARCH}try_cmpxchg${order}
-#define ${ARCH}try_cmpxchg${order}(_ptr, _oldp, _new) \\
+#ifndef arch_try_cmpxchg${order}
+#define arch_try_cmpxchg${order}(_ptr, _oldp, _new) \\
 ({ \\
        typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \\
-       ___r = ${ARCH}cmpxchg${order}((_ptr), ___o, (_new)); \\
+       ___r = arch_cmpxchg${order}((_ptr), ___o, (_new)); \\
        if (unlikely(___r != ___o)) \\
                *___op = ___r; \\
        likely(___r == ___o); \\
 })
-#endif /* ${ARCH}try_cmpxchg${order} */
+#endif /* arch_try_cmpxchg${order} */
 
 EOF
 }
 
 gen_try_cmpxchg_fallbacks()
 {
-       printf "#ifndef ${ARCH}try_cmpxchg_relaxed\n"
-       printf "#ifdef ${ARCH}try_cmpxchg\n"
+       printf "#ifndef arch_try_cmpxchg_relaxed\n"
+       printf "#ifdef arch_try_cmpxchg\n"
 
-       gen_basic_fallbacks "${ARCH}try_cmpxchg"
+       gen_basic_fallbacks "arch_try_cmpxchg"
 
-       printf "#endif /* ${ARCH}try_cmpxchg */\n\n"
+       printf "#endif /* arch_try_cmpxchg */\n\n"
 
        for order in "" "_acquire" "_release" "_relaxed"; do
                gen_try_cmpxchg_fallback "${order}"
        done
 
-       printf "#else /* ${ARCH}try_cmpxchg_relaxed */\n"
+       printf "#else /* arch_try_cmpxchg_relaxed */\n"
 
-       gen_order_fallbacks "${ARCH}try_cmpxchg"
+       gen_order_fallbacks "arch_try_cmpxchg"
 
-       printf "#endif /* ${ARCH}try_cmpxchg_relaxed */\n\n"
+       printf "#endif /* arch_try_cmpxchg_relaxed */\n\n"
 }
 
 cat << EOF
@@ -234,14 +214,14 @@ cat << EOF
 
 EOF
 
-for xchg in "${ARCH}xchg" "${ARCH}cmpxchg" "${ARCH}cmpxchg64"; do
+for xchg in "arch_xchg" "arch_cmpxchg" "arch_cmpxchg64"; do
        gen_xchg_fallbacks "${xchg}"
 done
 
 gen_try_cmpxchg_fallbacks
 
 grep '^[a-z]' "$1" | while read name meta args; do
-       gen_proto "${meta}" "${name}" "${ARCH}" "atomic" "int" ${args}
+       gen_proto "${meta}" "${name}" "atomic" "int" ${args}
 done
 
 cat <<EOF
@@ -252,7 +232,7 @@ cat <<EOF
 EOF
 
 grep '^[a-z]' "$1" | while read name meta args; do
-       gen_proto "${meta}" "${name}" "${ARCH}" "atomic64" "s64" ${args}
+       gen_proto "${meta}" "${name}" "atomic64" "s64" ${args}
 done
 
 cat <<EOF
index b0c45ae..035ceb4 100755 (executable)
@@ -121,8 +121,8 @@ cat << EOF
  * arch_ variants (i.e. arch_atomic_read()/arch_atomic_cmpxchg()) to avoid
  * double instrumentation.
  */
-#ifndef _ASM_GENERIC_ATOMIC_INSTRUMENTED_H
-#define _ASM_GENERIC_ATOMIC_INSTRUMENTED_H
+#ifndef _LINUX_ATOMIC_INSTRUMENTED_H
+#define _LINUX_ATOMIC_INSTRUMENTED_H
 
 #include <linux/build_bug.h>
 #include <linux/compiler.h>
@@ -138,6 +138,11 @@ grep '^[a-z]' "$1" | while read name meta args; do
        gen_proto "${meta}" "${name}" "atomic64" "s64" ${args}
 done
 
+grep '^[a-z]' "$1" | while read name meta args; do
+       gen_proto "${meta}" "${name}" "atomic_long" "long" ${args}
+done
+
+
 for xchg in "xchg" "cmpxchg" "cmpxchg64" "try_cmpxchg"; do
        for order in "" "_acquire" "_release" "_relaxed"; do
                gen_xchg "${xchg}${order}" ""
@@ -158,5 +163,5 @@ gen_xchg "cmpxchg_double_local" "2 * "
 
 cat <<EOF
 
-#endif /* _ASM_GENERIC_ATOMIC_INSTRUMENTED_H */
+#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */
 EOF
index e318d3f..eda89ce 100755 (executable)
@@ -47,9 +47,9 @@ gen_proto_order_variant()
 
 cat <<EOF
 static __always_inline ${ret}
-atomic_long_${name}(${params})
+arch_atomic_long_${name}(${params})
 {
-       ${retstmt}${atomic}_${name}(${argscast});
+       ${retstmt}arch_${atomic}_${name}(${argscast});
 }
 
 EOF
@@ -61,8 +61,8 @@ cat << EOF
 // Generated by $0
 // DO NOT MODIFY THIS FILE DIRECTLY
 
-#ifndef _ASM_GENERIC_ATOMIC_LONG_H
-#define _ASM_GENERIC_ATOMIC_LONG_H
+#ifndef _LINUX_ATOMIC_LONG_H
+#define _LINUX_ATOMIC_LONG_H
 
 #include <linux/compiler.h>
 #include <asm/types.h>
@@ -98,5 +98,5 @@ done
 
 cat <<EOF
 #endif /* CONFIG_64BIT */
-#endif /* _ASM_GENERIC_ATOMIC_LONG_H */
+#endif /* _LINUX_ATOMIC_LONG_H */
 EOF
index f776a57..5b98a83 100755 (executable)
@@ -8,9 +8,9 @@ ATOMICTBL=${ATOMICDIR}/atomics.tbl
 LINUXDIR=${ATOMICDIR}/../..
 
 cat <<EOF |
-gen-atomic-instrumented.sh      asm-generic/atomic-instrumented.h
-gen-atomic-long.sh              asm-generic/atomic-long.h
-gen-atomic-fallback.sh          linux/atomic-arch-fallback.h           arch_
+gen-atomic-instrumented.sh      linux/atomic/atomic-instrumented.h
+gen-atomic-long.sh              linux/atomic/atomic-long.h
+gen-atomic-fallback.sh          linux/atomic/atomic-arch-fallback.h
 EOF
 while read script header args; do
        /bin/sh ${ATOMICDIR}/${script} ${ATOMICTBL} ${args} > ${LINUXDIR}/include/${header}
index f67b125..94cd49e 100755 (executable)
@@ -1,10 +1,10 @@
 #! /usr/bin/env perl
 # SPDX-License-Identifier: GPL-2.0
 #
-# checkversion find uses of LINUX_VERSION_CODE or KERNEL_VERSION
-# without including <linux/version.h>, or cases of
-# including <linux/version.h> that don't need it.
-# Copyright (C) 2003, Randy Dunlap <rdunlap@xenotime.net>
+# checkversion finds uses of all macros in <linux/version.h>
+# where the source files do not #include <linux/version.h>; or cases
+# of including <linux/version.h> where it is not needed.
+# Copyright (C) 2003, Randy Dunlap <rdunlap@infradead.org>
 
 use strict;
 
@@ -13,7 +13,8 @@ $| = 1;
 my $debugging;
 
 foreach my $file (@ARGV) {
-    next if $file =~ "include/linux/version\.h";
+    next if $file =~ "include/generated/uapi/linux/version\.h";
+    next if $file =~ "usr/include/linux/version\.h";
     # Open this file.
     open( my $f, '<', $file )
       or die "Can't open $file: $!\n";
@@ -41,8 +42,11 @@ foreach my $file (@ARGV) {
            $iLinuxVersion      = $. if m/^\s*#\s*include\s*<linux\/version\.h>/o;
        }
 
-       # Look for uses: LINUX_VERSION_CODE, KERNEL_VERSION, UTS_RELEASE
-       if (($_ =~ /LINUX_VERSION_CODE/) || ($_ =~ /\WKERNEL_VERSION/)) {
+       # Look for uses: LINUX_VERSION_CODE, KERNEL_VERSION,
+       # LINUX_VERSION_MAJOR, LINUX_VERSION_PATCHLEVEL, LINUX_VERSION_SUBLEVEL
+       if (($_ =~ /LINUX_VERSION_CODE/) || ($_ =~ /\WKERNEL_VERSION/) ||
+           ($_ =~ /LINUX_VERSION_MAJOR/) || ($_ =~ /LINUX_VERSION_PATCHLEVEL/) ||
+           ($_ =~ /LINUX_VERSION_SUBLEVEL/)) {
            $fUseVersion = 1;
             last if $iLinuxVersion;
         }
index c17e480..8f6b13a 100755 (executable)
@@ -173,39 +173,6 @@ my $mcount_regex;  # Find the call site to mcount (return offset)
 my $mcount_adjust;     # Address adjustment to mcount offset
 my $alignment;         # The .align value to use for $mcount_section
 my $section_type;      # Section header plus possible alignment command
-my $can_use_local = 0;         # If we can use local function references
-
-# Shut up recordmcount if user has older objcopy
-my $quiet_recordmcount = ".tmp_quiet_recordmcount";
-my $print_warning = 1;
-$print_warning = 0 if ( -f $quiet_recordmcount);
-
-##
-# check_objcopy - whether objcopy supports --globalize-symbols
-#
-#  --globalize-symbols came out in 2.17, we must test the version
-#  of objcopy, and if it is less than 2.17, then we can not
-#  record local functions.
-sub check_objcopy
-{
-    open (IN, "$objcopy --version |") or die "error running $objcopy";
-    while (<IN>) {
-       if (/objcopy.*\s(\d+)\.(\d+)/) {
-           $can_use_local = 1 if ($1 > 2 || ($1 == 2 && $2 >= 17));
-           last;
-       }
-    }
-    close (IN);
-
-    if (!$can_use_local && $print_warning) {
-       print STDERR "WARNING: could not find objcopy version or version " .
-           "is less than 2.17.\n" .
-           "\tLocal function references are disabled.\n";
-       open (QUIET, ">$quiet_recordmcount");
-       printf QUIET "Disables the warning from recordmcount.pl\n";
-       close QUIET;
-    }
-}
 
 if ($arch =~ /(x86(_64)?)|(i386)/) {
     if ($bits == 64) {
@@ -434,8 +401,6 @@ if ($filename =~ m,^(.*)(\.\S),) {
 my $mcount_s = $dirname . "/.tmp_mc_" . $prefix . ".s";
 my $mcount_o = $dirname . "/.tmp_mc_" . $prefix . ".o";
 
-check_objcopy();
-
 #
 # Step 1: find all the local (static functions) and weak symbols.
 #         't' is local, 'w/W' is weak
@@ -473,11 +438,6 @@ sub update_funcs
 
     # is this function static? If so, note this fact.
     if (defined $locals{$ref_func}) {
-
-       # only use locals if objcopy supports globalize-symbols
-       if (!$can_use_local) {
-           return;
-       }
        $convert{$ref_func} = 1;
     }
 
index 74f8aad..7011fbe 100755 (executable)
@@ -17,7 +17,7 @@ Usage:
        $ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
        Wait some times but not too much, the script is a bit slow.
        Break the pipe (Ctrl + Z)
-       $ scripts/draw_functrace.py < raw_trace_func > draw_functrace
+       $ scripts/tracing/draw_functrace.py < ~/raw_trace_func > draw_functrace
        Then you have your drawn trace in draw_functrace
 """
 
@@ -103,10 +103,10 @@ def parseLine(line):
        line = line.strip()
        if line.startswith("#"):
                raise CommentLineException
-       m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
+       m = re.match("[^]]+?\\] +([a-z.]+) +([0-9.]+): (\\w+) <-(\\w+)", line)
        if m is None:
                raise BrokenLineException
-       return (m.group(1), m.group(2), m.group(3))
+       return (m.group(2), m.group(3), m.group(4))
 
 
 def main():
index 18f01f3..d98260f 100644 (file)
@@ -55,7 +55,7 @@ int __init parse_efi_signature_list(
                memcpy(&list, data, sizeof(list));
                pr_devel("LIST[%04x] guid=%pUl ls=%x hs=%x ss=%x\n",
                         offs,
-                        list.signature_type.b, list.signature_list_size,
+                        &list.signature_type, list.signature_list_size,
                         list.signature_header_size, list.signature_size);
 
                lsize = list.signature_list_size;
index 09533cb..9ffa9e9 100644 (file)
@@ -58,10 +58,11 @@ const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1] = {
        [LOCKDOWN_MMIOTRACE] = "unsafe mmio",
        [LOCKDOWN_DEBUGFS] = "debugfs access",
        [LOCKDOWN_XMON_WR] = "xmon write access",
+       [LOCKDOWN_BPF_WRITE_USER] = "use of bpf to write user RAM",
        [LOCKDOWN_INTEGRITY_MAX] = "integrity",
        [LOCKDOWN_KCORE] = "/proc/kcore access",
        [LOCKDOWN_KPROBES] = "use of kprobes",
-       [LOCKDOWN_BPF_READ] = "use of bpf to read kernel RAM",
+       [LOCKDOWN_BPF_READ_KERNEL] = "use of bpf to read kernel RAM",
        [LOCKDOWN_PERF] = "unsafe use of perf",
        [LOCKDOWN_TRACEFS] = "use of tracefs",
        [LOCKDOWN_XMON_RW] = "xmon read and write access",
index defc5ef..0ae1b71 100644 (file)
@@ -874,7 +874,7 @@ int policydb_load_isids(struct policydb *p, struct sidtab *s)
        rc = sidtab_init(s);
        if (rc) {
                pr_err("SELinux:  out of memory on SID table init\n");
-               goto out;
+               return rc;
        }
 
        head = p->ocontexts[OCON_ISID];
@@ -885,7 +885,7 @@ int policydb_load_isids(struct policydb *p, struct sidtab *s)
                if (sid == SECSID_NULL) {
                        pr_err("SELinux:  SID 0 was assigned a context.\n");
                        sidtab_destroy(s);
-                       goto out;
+                       return -EINVAL;
                }
 
                /* Ignore initial SIDs unused by this kernel. */
@@ -897,12 +897,10 @@ int policydb_load_isids(struct policydb *p, struct sidtab *s)
                        pr_err("SELinux:  unable to load initial SID %s.\n",
                               name);
                        sidtab_destroy(s);
-                       goto out;
+                       return rc;
                }
        }
-       rc = 0;
-out:
-       return rc;
+       return 0;
 }
 
 int policydb_class_isvalid(struct policydb *p, unsigned int class)
index 83b79ed..439a358 100644 (file)
@@ -215,7 +215,7 @@ static int snd_dma_continuous_mmap(struct snd_dma_buffer *dmab,
                                   struct vm_area_struct *area)
 {
        return remap_pfn_range(area, area->vm_start,
-                              dmab->addr >> PAGE_SHIFT,
+                              page_to_pfn(virt_to_page(dmab->area)),
                               area->vm_end - area->vm_start,
                               area->vm_page_prot);
 }
index 6a2971a..71323d8 100644 (file)
@@ -246,12 +246,15 @@ static bool hw_support_mmap(struct snd_pcm_substream *substream)
        if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_MMAP))
                return false;
 
-       if (substream->ops->mmap)
+       if (substream->ops->mmap || substream->ops->page)
                return true;
 
        switch (substream->dma_buffer.dev.type) {
        case SNDRV_DMA_TYPE_UNKNOWN:
-               return false;
+               /* we can't know the device, so just assume that the driver does
+                * everything right
+                */
+               return true;
        case SNDRV_DMA_TYPE_CONTINUOUS:
        case SNDRV_DMA_TYPE_VMALLOC:
                return true;
index b9c2ce2..84d7863 100644 (file)
@@ -514,10 +514,11 @@ static int check_and_subscribe_port(struct snd_seq_client *client,
        return err;
 }
 
-static void delete_and_unsubscribe_port(struct snd_seq_client *client,
-                                       struct snd_seq_client_port *port,
-                                       struct snd_seq_subscribers *subs,
-                                       bool is_src, bool ack)
+/* called with grp->list_mutex held */
+static void __delete_and_unsubscribe_port(struct snd_seq_client *client,
+                                         struct snd_seq_client_port *port,
+                                         struct snd_seq_subscribers *subs,
+                                         bool is_src, bool ack)
 {
        struct snd_seq_port_subs_info *grp;
        struct list_head *list;
@@ -525,7 +526,6 @@ static void delete_and_unsubscribe_port(struct snd_seq_client *client,
 
        grp = is_src ? &port->c_src : &port->c_dest;
        list = is_src ? &subs->src_list : &subs->dest_list;
-       down_write(&grp->list_mutex);
        write_lock_irq(&grp->list_lock);
        empty = list_empty(list);
        if (!empty)
@@ -535,6 +535,18 @@ static void delete_and_unsubscribe_port(struct snd_seq_client *client,
 
        if (!empty)
                unsubscribe_port(client, port, grp, &subs->info, ack);
+}
+
+static void delete_and_unsubscribe_port(struct snd_seq_client *client,
+                                       struct snd_seq_client_port *port,
+                                       struct snd_seq_subscribers *subs,
+                                       bool is_src, bool ack)
+{
+       struct snd_seq_port_subs_info *grp;
+
+       grp = is_src ? &port->c_src : &port->c_dest;
+       down_write(&grp->list_mutex);
+       __delete_and_unsubscribe_port(client, port, subs, is_src, ack);
        up_write(&grp->list_mutex);
 }
 
@@ -590,27 +602,30 @@ int snd_seq_port_disconnect(struct snd_seq_client *connector,
                            struct snd_seq_client_port *dest_port,
                            struct snd_seq_port_subscribe *info)
 {
-       struct snd_seq_port_subs_info *src = &src_port->c_src;
+       struct snd_seq_port_subs_info *dest = &dest_port->c_dest;
        struct snd_seq_subscribers *subs;
        int err = -ENOENT;
 
-       down_write(&src->list_mutex);
+       /* always start from deleting the dest port for avoiding concurrent
+        * deletions
+        */
+       down_write(&dest->list_mutex);
        /* look for the connection */
-       list_for_each_entry(subs, &src->list_head, src_list) {
+       list_for_each_entry(subs, &dest->list_head, dest_list) {
                if (match_subs_info(info, &subs->info)) {
-                       atomic_dec(&subs->ref_count); /* mark as not ready */
+                       __delete_and_unsubscribe_port(dest_client, dest_port,
+                                                     subs, false,
+                                                     connector->number != dest_client->number);
                        err = 0;
                        break;
                }
        }
-       up_write(&src->list_mutex);
+       up_write(&dest->list_mutex);
        if (err < 0)
                return err;
 
        delete_and_unsubscribe_port(src_client, src_port, subs, true,
                                    connector->number != src_client->number);
-       delete_and_unsubscribe_port(dest_client, dest_port, subs, false,
-                                   connector->number != dest_client->number);
        kfree(subs);
        return 0;
 }
index 0ef242f..fff18b5 100644 (file)
@@ -153,7 +153,7 @@ static int init_stream(struct snd_oxfw *oxfw, struct amdtp_stream *stream)
        struct cmp_connection *conn;
        enum cmp_direction c_dir;
        enum amdtp_stream_direction s_dir;
-       unsigned int flags = CIP_UNAWARE_SYT;
+       unsigned int flags = 0;
        int err;
 
        if (!(oxfw->quirks & SND_OXFW_QUIRK_BLOCKING_TRANSMISSION))
@@ -161,6 +161,13 @@ static int init_stream(struct snd_oxfw *oxfw, struct amdtp_stream *stream)
        else
                flags |= CIP_BLOCKING;
 
+       // OXFW 970/971 has no function to generate playback timing according to the sequence
+       // of value in syt field, thus the packet should include NO_INFO value in the field.
+       // However, some models just ignore data blocks in packet with NO_INFO for audio data
+       // processing.
+       if (!(oxfw->quirks & SND_OXFW_QUIRK_IGNORE_NO_INFO_PACKET))
+               flags |= CIP_UNAWARE_SYT;
+
        if (stream == &oxfw->tx_stream) {
                conn = &oxfw->out_conn;
                c_dir = CMP_OUTPUT;
index 84971d7..cb5b5e3 100644 (file)
@@ -159,8 +159,10 @@ static int detect_quirks(struct snd_oxfw *oxfw, const struct ieee1394_device_id
                return snd_oxfw_scs1x_add(oxfw);
        }
 
-       if (entry->vendor_id == OUI_APOGEE && entry->model_id == MODEL_DUET_FW)
-               oxfw->quirks |= SND_OXFW_QUIRK_BLOCKING_TRANSMISSION;
+       if (entry->vendor_id == OUI_APOGEE && entry->model_id == MODEL_DUET_FW) {
+               oxfw->quirks |= SND_OXFW_QUIRK_BLOCKING_TRANSMISSION |
+                               SND_OXFW_QUIRK_IGNORE_NO_INFO_PACKET;
+       }
 
        /*
         * TASCAM FireOne has physical control and requires a pair of additional
index ee47abc..c13034f 100644 (file)
@@ -42,6 +42,11 @@ enum snd_oxfw_quirk {
        SND_OXFW_QUIRK_BLOCKING_TRANSMISSION = 0x04,
        // Stanton SCS1.d and SCS1.m support unique transaction.
        SND_OXFW_QUIRK_SCS_TRANSACTION = 0x08,
+       // Apogee Duet FireWire ignores data blocks in packet with NO_INFO for audio data
+       // processing, while output level meter moves. Any value in syt field of packet takes
+       // the device to process audio data even if the value is invalid in a point of
+       // IEC 61883-1/6.
+       SND_OXFW_QUIRK_IGNORE_NO_INFO_PACKET = 0x10,
 };
 
 /* This is an arbitrary number for convinience. */
index e97d005..481d8f8 100644 (file)
@@ -3460,7 +3460,7 @@ static int cap_put_caller(struct snd_kcontrol *kcontrol,
        struct hda_gen_spec *spec = codec->spec;
        const struct hda_input_mux *imux;
        struct nid_path *path;
-       int i, adc_idx, err = 0;
+       int i, adc_idx, ret, err = 0;
 
        imux = &spec->input_mux;
        adc_idx = kcontrol->id.index;
@@ -3470,9 +3470,13 @@ static int cap_put_caller(struct snd_kcontrol *kcontrol,
                if (!path || !path->ctls[type])
                        continue;
                kcontrol->private_value = path->ctls[type];
-               err = func(kcontrol, ucontrol);
-               if (err < 0)
+               ret = func(kcontrol, ucontrol);
+               if (ret < 0) {
+                       err = ret;
                        break;
+               }
+               if (ret > 0)
+                       err = 1;
        }
        mutex_unlock(&codec->control_mutex);
        if (err >= 0 && spec->cap_sync_hook)
index 0322b28..0062c18 100644 (file)
@@ -883,10 +883,11 @@ static unsigned int azx_get_pos_skl(struct azx *chip, struct azx_dev *azx_dev)
        return azx_get_pos_posbuf(chip, azx_dev);
 }
 
-static void azx_shutdown_chip(struct azx *chip)
+static void __azx_shutdown_chip(struct azx *chip, bool skip_link_reset)
 {
        azx_stop_chip(chip);
-       azx_enter_link_reset(chip);
+       if (!skip_link_reset)
+               azx_enter_link_reset(chip);
        azx_clear_irq_pending(chip);
        display_power(chip, false);
 }
@@ -895,6 +896,11 @@ static void azx_shutdown_chip(struct azx *chip)
 static DEFINE_MUTEX(card_list_lock);
 static LIST_HEAD(card_list);
 
+static void azx_shutdown_chip(struct azx *chip)
+{
+       __azx_shutdown_chip(chip, false);
+}
+
 static void azx_add_card_list(struct azx *chip)
 {
        struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
@@ -2385,7 +2391,7 @@ static void azx_shutdown(struct pci_dev *pci)
                return;
        chip = card->private_data;
        if (chip && chip->running)
-               azx_shutdown_chip(chip);
+               __azx_shutdown_chip(chip, true);
 }
 
 /* PCI IDs */
index caaf0e8..7ad689f 100644 (file)
@@ -6658,6 +6658,7 @@ enum {
        ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP,
        ALC623_FIXUP_LENOVO_THINKSTATION_P340,
        ALC255_FIXUP_ACER_HEADPHONE_AND_MIC,
+       ALC236_FIXUP_HP_LIMIT_INT_MIC_BOOST,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -8242,6 +8243,12 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC255_FIXUP_XIAOMI_HEADSET_MIC
        },
+       [ALC236_FIXUP_HP_LIMIT_INT_MIC_BOOST] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc269_fixup_limit_int_mic_boost,
+               .chained = true,
+               .chain_id = ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF,
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -8274,9 +8281,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
        SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
        SND_PCI_QUIRK(0x1025, 0x129c, "Acer SWIFT SF314-55", ALC256_FIXUP_ACER_HEADSET_MIC),
+       SND_PCI_QUIRK(0x1025, 0x1300, "Acer SWIFT SF314-56", ALC256_FIXUP_ACER_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1025, 0x1308, "Acer Aspire Z24-890", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
        SND_PCI_QUIRK(0x1025, 0x132a, "Acer TravelMate B114-21", ALC233_FIXUP_ACER_HEADSET_MIC),
        SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC),
+       SND_PCI_QUIRK(0x1025, 0x142b, "Acer Swift SF314-42", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1025, 0x1430, "Acer TravelMate B311R-31", ALC256_FIXUP_ACER_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1025, 0x1466, "Acer Aspire A515-56", ALC255_FIXUP_ACER_HEADPHONE_AND_MIC),
        SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
@@ -8330,6 +8339,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x0a2e, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC),
        SND_PCI_QUIRK(0x1028, 0x0a30, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC),
        SND_PCI_QUIRK(0x1028, 0x0a58, "Dell", ALC255_FIXUP_DELL_HEADSET_MIC),
+       SND_PCI_QUIRK(0x1028, 0x0a61, "Dell XPS 15 9510", ALC289_FIXUP_DUAL_SPK),
        SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -8429,13 +8439,14 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x87f4, "HP", ALC287_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x87f5, "HP", ALC287_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x87f7, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
+       SND_PCI_QUIRK(0x103c, 0x8805, "HP ProBook 650 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x880d, "HP EliteBook 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8846, "HP EliteBook 850 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8847, "HP EliteBook x360 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x884b, "HP EliteBook 840 Aero G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x884c, "HP EliteBook 840 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
-       SND_PCI_QUIRK(0x103c, 0x8862, "HP ProBook 445 G8 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
-       SND_PCI_QUIRK(0x103c, 0x8863, "HP ProBook 445 G8 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+       SND_PCI_QUIRK(0x103c, 0x8862, "HP ProBook 445 G8 Notebook PC", ALC236_FIXUP_HP_LIMIT_INT_MIC_BOOST),
+       SND_PCI_QUIRK(0x103c, 0x8863, "HP ProBook 445 G8 Notebook PC", ALC236_FIXUP_HP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x103c, 0x886d, "HP ZBook Fury 17.3 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
        SND_PCI_QUIRK(0x103c, 0x8870, "HP ZBook Fury 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
        SND_PCI_QUIRK(0x103c, 0x8873, "HP ZBook Studio 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
@@ -8463,6 +8474,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
        SND_PCI_QUIRK(0x1043, 0x1740, "ASUS UX430UA", ALC295_FIXUP_ASUS_DACS),
        SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK),
+       SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK),
        SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS),
        SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
        SND_PCI_QUIRK(0x1043, 0x18f1, "Asus FX505DT", ALC256_FIXUP_ASUS_HEADSET_MIC),
index a5c1a2c..773a136 100644 (file)
@@ -1041,6 +1041,7 @@ static const struct hda_fixup via_fixups[] = {
 };
 
 static const struct snd_pci_quirk vt2002p_fixups[] = {
+       SND_PCI_QUIRK(0x1043, 0x13f7, "Asus B23E", VIA_FIXUP_POWER_SAVE),
        SND_PCI_QUIRK(0x1043, 0x1487, "Asus G75", VIA_FIXUP_ASUS_G75),
        SND_PCI_QUIRK(0x1043, 0x8532, "Asus X202E", VIA_FIXUP_INTMIC_BOOST),
        SND_PCI_QUIRK_VENDOR(0x1558, "Clevo", VIA_FIXUP_POWER_SAVE),
index 8a13462..5dcf77a 100644 (file)
@@ -36,6 +36,7 @@ config SND_SOC_COMPRESS
 
 config SND_SOC_TOPOLOGY
        bool
+       select SND_DYNAMIC_MINORS
 
 config SND_SOC_TOPOLOGY_KUNIT_TEST
        tristate "KUnit tests for SoC topology"
index 9449fb4..3c60c5f 100644 (file)
@@ -525,6 +525,7 @@ static struct snd_soc_dai_link cz_dai_7219_98357[] = {
                                | SND_SOC_DAIFMT_CBM_CFM,
                .init = cz_da7219_init,
                .dpcm_playback = 1,
+               .stop_dma_first = 1,
                .ops = &cz_da7219_play_ops,
                SND_SOC_DAILINK_REG(designware1, dlgs, platform),
        },
@@ -534,6 +535,7 @@ static struct snd_soc_dai_link cz_dai_7219_98357[] = {
                .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
                                | SND_SOC_DAIFMT_CBM_CFM,
                .dpcm_capture = 1,
+               .stop_dma_first = 1,
                .ops = &cz_da7219_cap_ops,
                SND_SOC_DAILINK_REG(designware2, dlgs, platform),
        },
@@ -543,6 +545,7 @@ static struct snd_soc_dai_link cz_dai_7219_98357[] = {
                .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
                                | SND_SOC_DAIFMT_CBM_CFM,
                .dpcm_playback = 1,
+               .stop_dma_first = 1,
                .ops = &cz_max_play_ops,
                SND_SOC_DAILINK_REG(designware3, mx, platform),
        },
@@ -553,6 +556,7 @@ static struct snd_soc_dai_link cz_dai_7219_98357[] = {
                .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
                                | SND_SOC_DAIFMT_CBM_CFM,
                .dpcm_capture = 1,
+               .stop_dma_first = 1,
                .ops = &cz_dmic0_cap_ops,
                SND_SOC_DAILINK_REG(designware3, adau, platform),
        },
@@ -563,6 +567,7 @@ static struct snd_soc_dai_link cz_dai_7219_98357[] = {
                .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
                                | SND_SOC_DAIFMT_CBM_CFM,
                .dpcm_capture = 1,
+               .stop_dma_first = 1,
                .ops = &cz_dmic1_cap_ops,
                SND_SOC_DAILINK_REG(designware2, adau, platform),
        },
index 143155a..cc1ce6f 100644 (file)
@@ -969,7 +969,7 @@ static int acp_dma_hw_params(struct snd_soc_component *component,
 
        acp_set_sram_bank_state(rtd->acp_mmio, 0, true);
        /* Save for runtime private data */
-       rtd->dma_addr = substream->dma_buffer.addr;
+       rtd->dma_addr = runtime->dma_addr;
        rtd->order = get_order(size);
 
        /* Fill the page table entries in ACP SRAM */
index 8148b0d..597d7c4 100644 (file)
@@ -286,7 +286,7 @@ static int acp3x_dma_hw_params(struct snd_soc_component *component,
                pr_err("pinfo failed\n");
        }
        size = params_buffer_bytes(params);
-       rtd->dma_addr = substream->dma_buffer.addr;
+       rtd->dma_addr = substream->runtime->dma_addr;
        rtd->num_pages = (PAGE_ALIGN(size) >> PAGE_SHIFT);
        config_acp3x_dma(rtd, substream->stream);
        return 0;
index bd20622..0391c28 100644 (file)
@@ -242,7 +242,7 @@ static int acp_pdm_dma_hw_params(struct snd_soc_component *component,
                return -EINVAL;
        size = params_buffer_bytes(params);
        period_bytes = params_period_bytes(params);
-       rtd->dma_addr = substream->dma_buffer.addr;
+       rtd->dma_addr = substream->runtime->dma_addr;
        rtd->num_pages = (PAGE_ALIGN(size) >> PAGE_SHIFT);
        config_acp_dma(rtd, substream->stream);
        init_pdm_ring_buffer(MEM_WINDOW_START, size, period_bytes,
index 19438da..7b8040e 100644 (file)
@@ -382,6 +382,8 @@ static const struct dev_pm_ops rn_acp_pm = {
        .runtime_resume =  snd_rn_acp_resume,
        .suspend = snd_rn_acp_suspend,
        .resume =       snd_rn_acp_resume,
+       .restore =      snd_rn_acp_resume,
+       .poweroff =     snd_rn_acp_suspend,
 };
 
 static void snd_rn_acp_remove(struct pci_dev *pci)
index a3b784e..db16071 100644 (file)
@@ -1559,6 +1559,7 @@ config SND_SOC_WCD934X
 config SND_SOC_WCD938X
        depends on SND_SOC_WCD938X_SDW
        tristate
+       depends on SOUNDWIRE || !SOUNDWIRE
 
 config SND_SOC_WCD938X_SDW
        tristate "WCD9380/WCD9385 Codec - SDW"
index de8b83d..7bb38c3 100644 (file)
@@ -583,7 +583,10 @@ obj-$(CONFIG_SND_SOC_WCD_MBHC)     += snd-soc-wcd-mbhc.o
 obj-$(CONFIG_SND_SOC_WCD9335)  += snd-soc-wcd9335.o
 obj-$(CONFIG_SND_SOC_WCD934X)  += snd-soc-wcd934x.o
 obj-$(CONFIG_SND_SOC_WCD938X)  += snd-soc-wcd938x.o
-obj-$(CONFIG_SND_SOC_WCD938X_SDW) += snd-soc-wcd938x-sdw.o
+ifdef CONFIG_SND_SOC_WCD938X_SDW
+# avoid link failure by forcing sdw code built-in when needed
+obj-$(CONFIG_SND_SOC_WCD938X) += snd-soc-wcd938x-sdw.o
+endif
 obj-$(CONFIG_SND_SOC_WL1273)   += snd-soc-wl1273.o
 obj-$(CONFIG_SND_SOC_WM0010)   += snd-soc-wm0010.o
 obj-$(CONFIG_SND_SOC_WM1250_EV1) += snd-soc-wm1250-ev1.o
index eff013f..99c022b 100644 (file)
@@ -405,7 +405,7 @@ static const struct regmap_config cs42l42_regmap = {
        .use_single_write = true,
 };
 
-static DECLARE_TLV_DB_SCALE(adc_tlv, -9600, 100, false);
+static DECLARE_TLV_DB_SCALE(adc_tlv, -9700, 100, true);
 static DECLARE_TLV_DB_SCALE(mixer_tlv, -6300, 100, true);
 
 static const char * const cs42l42_hpf_freq_text[] = {
@@ -425,34 +425,23 @@ static SOC_ENUM_SINGLE_DECL(cs42l42_wnf3_freq_enum, CS42L42_ADC_WNF_HPF_CTL,
                            CS42L42_ADC_WNF_CF_SHIFT,
                            cs42l42_wnf3_freq_text);
 
-static const char * const cs42l42_wnf05_freq_text[] = {
-       "280Hz", "315Hz", "350Hz", "385Hz",
-       "420Hz", "455Hz", "490Hz", "525Hz"
-};
-
-static SOC_ENUM_SINGLE_DECL(cs42l42_wnf05_freq_enum, CS42L42_ADC_WNF_HPF_CTL,
-                           CS42L42_ADC_WNF_CF_SHIFT,
-                           cs42l42_wnf05_freq_text);
-
 static const struct snd_kcontrol_new cs42l42_snd_controls[] = {
        /* ADC Volume and Filter Controls */
        SOC_SINGLE("ADC Notch Switch", CS42L42_ADC_CTL,
-                               CS42L42_ADC_NOTCH_DIS_SHIFT, true, false),
+                               CS42L42_ADC_NOTCH_DIS_SHIFT, true, true),
        SOC_SINGLE("ADC Weak Force Switch", CS42L42_ADC_CTL,
                                CS42L42_ADC_FORCE_WEAK_VCM_SHIFT, true, false),
        SOC_SINGLE("ADC Invert Switch", CS42L42_ADC_CTL,
                                CS42L42_ADC_INV_SHIFT, true, false),
        SOC_SINGLE("ADC Boost Switch", CS42L42_ADC_CTL,
                                CS42L42_ADC_DIG_BOOST_SHIFT, true, false),
-       SOC_SINGLE_SX_TLV("ADC Volume", CS42L42_ADC_VOLUME,
-                               CS42L42_ADC_VOL_SHIFT, 0xA0, 0x6C, adc_tlv),
+       SOC_SINGLE_S8_TLV("ADC Volume", CS42L42_ADC_VOLUME, -97, 12, adc_tlv),
        SOC_SINGLE("ADC WNF Switch", CS42L42_ADC_WNF_HPF_CTL,
                                CS42L42_ADC_WNF_EN_SHIFT, true, false),
        SOC_SINGLE("ADC HPF Switch", CS42L42_ADC_WNF_HPF_CTL,
                                CS42L42_ADC_HPF_EN_SHIFT, true, false),
        SOC_ENUM("HPF Corner Freq", cs42l42_hpf_freq_enum),
        SOC_ENUM("WNF 3dB Freq", cs42l42_wnf3_freq_enum),
-       SOC_ENUM("WNF 05dB Freq", cs42l42_wnf05_freq_enum),
 
        /* DAC Volume and Filter Controls */
        SOC_SINGLE("DACA Invert Switch", CS42L42_DAC_CTL1,
@@ -471,8 +460,8 @@ static const struct snd_soc_dapm_widget cs42l42_dapm_widgets[] = {
        SND_SOC_DAPM_OUTPUT("HP"),
        SND_SOC_DAPM_DAC("DAC", NULL, CS42L42_PWR_CTL1, CS42L42_HP_PDN_SHIFT, 1),
        SND_SOC_DAPM_MIXER("MIXER", CS42L42_PWR_CTL1, CS42L42_MIXER_PDN_SHIFT, 1, NULL, 0),
-       SND_SOC_DAPM_AIF_IN("SDIN1", NULL, 0, CS42L42_ASP_RX_DAI0_EN, CS42L42_ASP_RX0_CH1_SHIFT, 0),
-       SND_SOC_DAPM_AIF_IN("SDIN2", NULL, 1, CS42L42_ASP_RX_DAI0_EN, CS42L42_ASP_RX0_CH2_SHIFT, 0),
+       SND_SOC_DAPM_AIF_IN("SDIN1", NULL, 0, SND_SOC_NOPM, 0, 0),
+       SND_SOC_DAPM_AIF_IN("SDIN2", NULL, 1, SND_SOC_NOPM, 0, 0),
 
        /* Playback Requirements */
        SND_SOC_DAPM_SUPPLY("ASP DAI0", CS42L42_PWR_CTL1, CS42L42_ASP_DAI_PDN_SHIFT, 1, NULL, 0),
@@ -630,6 +619,8 @@ static int cs42l42_pll_config(struct snd_soc_component *component)
 
        for (i = 0; i < ARRAY_SIZE(pll_ratio_table); i++) {
                if (pll_ratio_table[i].sclk == clk) {
+                       cs42l42->pll_config = i;
+
                        /* Configure the internal sample rate */
                        snd_soc_component_update_bits(component, CS42L42_MCLK_CTL,
                                        CS42L42_INTERNAL_FS_MASK,
@@ -638,14 +629,9 @@ static int cs42l42_pll_config(struct snd_soc_component *component)
                                        (pll_ratio_table[i].mclk_int !=
                                        24000000)) <<
                                        CS42L42_INTERNAL_FS_SHIFT);
-                       /* Set the MCLK src (PLL or SCLK) and the divide
-                        * ratio
-                        */
+
                        snd_soc_component_update_bits(component, CS42L42_MCLK_SRC_SEL,
-                                       CS42L42_MCLK_SRC_SEL_MASK |
                                        CS42L42_MCLKDIV_MASK,
-                                       (pll_ratio_table[i].mclk_src_sel
-                                       << CS42L42_MCLK_SRC_SEL_SHIFT) |
                                        (pll_ratio_table[i].mclk_div <<
                                        CS42L42_MCLKDIV_SHIFT));
                        /* Set up the LRCLK */
@@ -681,15 +667,6 @@ static int cs42l42_pll_config(struct snd_soc_component *component)
                                        CS42L42_FSYNC_PULSE_WIDTH_MASK,
                                        CS42L42_FRAC1_VAL(fsync - 1) <<
                                        CS42L42_FSYNC_PULSE_WIDTH_SHIFT);
-                       snd_soc_component_update_bits(component,
-                                       CS42L42_ASP_FRM_CFG,
-                                       CS42L42_ASP_5050_MASK,
-                                       CS42L42_ASP_5050_MASK);
-                       /* Set the frame delay to 1.0 SCLK clocks */
-                       snd_soc_component_update_bits(component, CS42L42_ASP_FRM_CFG,
-                                       CS42L42_ASP_FSD_MASK,
-                                       CS42L42_ASP_FSD_1_0 <<
-                                       CS42L42_ASP_FSD_SHIFT);
                        /* Set the sample rates (96k or lower) */
                        snd_soc_component_update_bits(component, CS42L42_FS_RATE_EN,
                                        CS42L42_FS_EN_MASK,
@@ -789,7 +766,18 @@ static int cs42l42_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
        /* interface format */
        switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
        case SND_SOC_DAIFMT_I2S:
-       case SND_SOC_DAIFMT_LEFT_J:
+               /*
+                * 5050 mode, frame starts on falling edge of LRCLK,
+                * frame delayed by 1.0 SCLKs
+                */
+               snd_soc_component_update_bits(component,
+                                             CS42L42_ASP_FRM_CFG,
+                                             CS42L42_ASP_STP_MASK |
+                                             CS42L42_ASP_5050_MASK |
+                                             CS42L42_ASP_FSD_MASK,
+                                             CS42L42_ASP_5050_MASK |
+                                             (CS42L42_ASP_FSD_1_0 <<
+                                               CS42L42_ASP_FSD_SHIFT));
                break;
        default:
                return -EINVAL;
@@ -819,6 +807,25 @@ static int cs42l42_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
        return 0;
 }
 
+static int cs42l42_dai_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
+{
+       struct snd_soc_component *component = dai->component;
+       struct cs42l42_private *cs42l42 = snd_soc_component_get_drvdata(component);
+
+       /*
+        * Sample rates < 44.1 kHz would produce an out-of-range SCLK with
+        * a standard I2S frame. If the machine driver sets SCLK it must be
+        * legal.
+        */
+       if (cs42l42->sclk)
+               return 0;
+
+       /* Machine driver has not set a SCLK, limit bottom end to 44.1 kHz */
+       return snd_pcm_hw_constraint_minmax(substream->runtime,
+                                           SNDRV_PCM_HW_PARAM_RATE,
+                                           44100, 192000);
+}
+
 static int cs42l42_pcm_hw_params(struct snd_pcm_substream *substream,
                                struct snd_pcm_hw_params *params,
                                struct snd_soc_dai *dai)
@@ -832,6 +839,10 @@ static int cs42l42_pcm_hw_params(struct snd_pcm_substream *substream,
        cs42l42->srate = params_rate(params);
        cs42l42->bclk = snd_soc_params_to_bclk(params);
 
+       /* I2S frame always has 2 channels even for mono audio */
+       if (channels == 1)
+               cs42l42->bclk *= 2;
+
        switch(substream->stream) {
        case SNDRV_PCM_STREAM_CAPTURE:
                if (channels == 2) {
@@ -855,6 +866,17 @@ static int cs42l42_pcm_hw_params(struct snd_pcm_substream *substream,
                snd_soc_component_update_bits(component, CS42L42_ASP_RX_DAI0_CH2_AP_RES,
                                                         CS42L42_ASP_RX_CH_AP_MASK |
                                                         CS42L42_ASP_RX_CH_RES_MASK, val);
+
+               /* Channel B comes from the last active channel */
+               snd_soc_component_update_bits(component, CS42L42_SP_RX_CH_SEL,
+                                             CS42L42_SP_RX_CHB_SEL_MASK,
+                                             (channels - 1) << CS42L42_SP_RX_CHB_SEL_SHIFT);
+
+               /* Both LRCLK slots must be enabled */
+               snd_soc_component_update_bits(component, CS42L42_ASP_RX_DAI0_EN,
+                                             CS42L42_ASP_RX0_CH_EN_MASK,
+                                             BIT(CS42L42_ASP_RX0_CH1_SHIFT) |
+                                             BIT(CS42L42_ASP_RX0_CH2_SHIFT));
                break;
        default:
                break;
@@ -900,13 +922,21 @@ static int cs42l42_mute_stream(struct snd_soc_dai *dai, int mute, int stream)
                         */
                        regmap_multi_reg_write(cs42l42->regmap, cs42l42_to_osc_seq,
                                               ARRAY_SIZE(cs42l42_to_osc_seq));
+
+                       /* Must disconnect PLL before stopping it */
+                       snd_soc_component_update_bits(component,
+                                                     CS42L42_MCLK_SRC_SEL,
+                                                     CS42L42_MCLK_SRC_SEL_MASK,
+                                                     0);
+                       usleep_range(100, 200);
+
                        snd_soc_component_update_bits(component, CS42L42_PLL_CTL1,
                                                      CS42L42_PLL_START_MASK, 0);
                }
        } else {
                if (!cs42l42->stream_use) {
                        /* SCLK must be running before codec unmute */
-                       if ((cs42l42->bclk < 11289600) && (cs42l42->sclk < 11289600)) {
+                       if (pll_ratio_table[cs42l42->pll_config].mclk_src_sel) {
                                snd_soc_component_update_bits(component, CS42L42_PLL_CTL1,
                                                              CS42L42_PLL_START_MASK, 1);
 
@@ -927,6 +957,12 @@ static int cs42l42_mute_stream(struct snd_soc_dai *dai, int mute, int stream)
                                                               CS42L42_PLL_LOCK_TIMEOUT_US);
                                if (ret < 0)
                                        dev_warn(component->dev, "PLL failed to lock: %d\n", ret);
+
+                               /* PLL must be running to drive glitchless switch logic */
+                               snd_soc_component_update_bits(component,
+                                                             CS42L42_MCLK_SRC_SEL,
+                                                             CS42L42_MCLK_SRC_SEL_MASK,
+                                                             CS42L42_MCLK_SRC_SEL_MASK);
                        }
 
                        /* Mark SCLK as present, turn off internal oscillator */
@@ -960,8 +996,8 @@ static int cs42l42_mute_stream(struct snd_soc_dai *dai, int mute, int stream)
                         SNDRV_PCM_FMTBIT_S24_LE |\
                         SNDRV_PCM_FMTBIT_S32_LE )
 
-
 static const struct snd_soc_dai_ops cs42l42_ops = {
+       .startup        = cs42l42_dai_startup,
        .hw_params      = cs42l42_pcm_hw_params,
        .set_fmt        = cs42l42_set_dai_fmt,
        .set_sysclk     = cs42l42_set_sysclk,
index 206b3c8..8734f68 100644 (file)
 
 /* Page 0x25 Audio Port Registers */
 #define CS42L42_SP_RX_CH_SEL           (CS42L42_PAGE_25 + 0x01)
+#define CS42L42_SP_RX_CHB_SEL_SHIFT    2
+#define CS42L42_SP_RX_CHB_SEL_MASK     (3 << CS42L42_SP_RX_CHB_SEL_SHIFT)
 
 #define CS42L42_SP_RX_ISOC_CTL         (CS42L42_PAGE_25 + 0x02)
 #define CS42L42_SP_RX_RSYNC_SHIFT      6
@@ -775,6 +777,7 @@ struct  cs42l42_private {
        struct gpio_desc *reset_gpio;
        struct completion pdn_done;
        struct snd_soc_jack *jack;
+       int pll_config;
        int bclk;
        u32 sclk;
        u32 srate;
index 15bd833..db88be4 100644 (file)
@@ -828,36 +828,6 @@ static void nau8824_int_status_clear_all(struct regmap *regmap)
        }
 }
 
-static void nau8824_dapm_disable_pin(struct nau8824 *nau8824, const char *pin)
-{
-       struct snd_soc_dapm_context *dapm = nau8824->dapm;
-       const char *prefix = dapm->component->name_prefix;
-       char prefixed_pin[80];
-
-       if (prefix) {
-               snprintf(prefixed_pin, sizeof(prefixed_pin), "%s %s",
-                        prefix, pin);
-               snd_soc_dapm_disable_pin(dapm, prefixed_pin);
-       } else {
-               snd_soc_dapm_disable_pin(dapm, pin);
-       }
-}
-
-static void nau8824_dapm_enable_pin(struct nau8824 *nau8824, const char *pin)
-{
-       struct snd_soc_dapm_context *dapm = nau8824->dapm;
-       const char *prefix = dapm->component->name_prefix;
-       char prefixed_pin[80];
-
-       if (prefix) {
-               snprintf(prefixed_pin, sizeof(prefixed_pin), "%s %s",
-                        prefix, pin);
-               snd_soc_dapm_force_enable_pin(dapm, prefixed_pin);
-       } else {
-               snd_soc_dapm_force_enable_pin(dapm, pin);
-       }
-}
-
 static void nau8824_eject_jack(struct nau8824 *nau8824)
 {
        struct snd_soc_dapm_context *dapm = nau8824->dapm;
@@ -866,8 +836,8 @@ static void nau8824_eject_jack(struct nau8824 *nau8824)
        /* Clear all interruption status */
        nau8824_int_status_clear_all(regmap);
 
-       nau8824_dapm_disable_pin(nau8824, "SAR");
-       nau8824_dapm_disable_pin(nau8824, "MICBIAS");
+       snd_soc_dapm_disable_pin(dapm, "SAR");
+       snd_soc_dapm_disable_pin(dapm, "MICBIAS");
        snd_soc_dapm_sync(dapm);
 
        /* Enable the insertion interruption, disable the ejection
@@ -897,8 +867,8 @@ static void nau8824_jdet_work(struct work_struct *work)
        struct regmap *regmap = nau8824->regmap;
        int adc_value, event = 0, event_mask = 0;
 
-       nau8824_dapm_enable_pin(nau8824, "MICBIAS");
-       nau8824_dapm_enable_pin(nau8824, "SAR");
+       snd_soc_dapm_enable_pin(dapm, "MICBIAS");
+       snd_soc_dapm_enable_pin(dapm, "SAR");
        snd_soc_dapm_sync(dapm);
 
        msleep(100);
@@ -909,8 +879,8 @@ static void nau8824_jdet_work(struct work_struct *work)
        if (adc_value < HEADSET_SARADC_THD) {
                event |= SND_JACK_HEADPHONE;
 
-               nau8824_dapm_disable_pin(nau8824, "SAR");
-               nau8824_dapm_disable_pin(nau8824, "MICBIAS");
+               snd_soc_dapm_disable_pin(dapm, "SAR");
+               snd_soc_dapm_disable_pin(dapm, "MICBIAS");
                snd_soc_dapm_sync(dapm);
        } else {
                event |= SND_JACK_HEADSET;
index abcd6f4..51ecaa2 100644 (file)
@@ -44,6 +44,7 @@ static const struct reg_sequence patch_list[] = {
        {RT5682_I2C_CTRL, 0x000f},
        {RT5682_PLL2_INTERNAL, 0x8266},
        {RT5682_SAR_IL_CMD_3, 0x8365},
+       {RT5682_SAR_IL_CMD_6, 0x0180},
 };
 
 void rt5682_apply_patch_list(struct rt5682_priv *rt5682, struct device *dev)
index b504d63..52d2c96 100644 (file)
@@ -35,6 +35,9 @@
 
 #include "tlv320aic31xx.h"
 
+static int aic31xx_set_jack(struct snd_soc_component *component,
+                            struct snd_soc_jack *jack, void *data);
+
 static const struct reg_default aic31xx_reg_defaults[] = {
        { AIC31XX_CLKMUX, 0x00 },
        { AIC31XX_PLLPR, 0x11 },
@@ -1256,6 +1259,13 @@ static int aic31xx_power_on(struct snd_soc_component *component)
                return ret;
        }
 
+       /*
+        * The jack detection configuration is in the same register
+        * that is used to report jack detect status so is volatile
+        * and not covered by the cache sync, restore it separately.
+        */
+       aic31xx_set_jack(component, aic31xx->jack, NULL);
+
        return 0;
 }
 
index dcd8aeb..2e9175b 100644 (file)
@@ -682,11 +682,20 @@ static int aic32x4_set_dosr(struct snd_soc_component *component, u16 dosr)
 static int aic32x4_set_processing_blocks(struct snd_soc_component *component,
                                                u8 r_block, u8 p_block)
 {
-       if (r_block > 18 || p_block > 25)
-               return -EINVAL;
+       struct aic32x4_priv *aic32x4 = snd_soc_component_get_drvdata(component);
+
+       if (aic32x4->type == AIC32X4_TYPE_TAS2505) {
+               if (r_block || p_block > 3)
+                       return -EINVAL;
 
-       snd_soc_component_write(component, AIC32X4_ADCSPB, r_block);
-       snd_soc_component_write(component, AIC32X4_DACSPB, p_block);
+               snd_soc_component_write(component, AIC32X4_DACSPB, p_block);
+       } else { /* AIC32x4 */
+               if (r_block > 18 || p_block > 25)
+                       return -EINVAL;
+
+               snd_soc_component_write(component, AIC32X4_ADCSPB, r_block);
+               snd_soc_component_write(component, AIC32X4_DACSPB, p_block);
+       }
 
        return 0;
 }
@@ -695,6 +704,7 @@ static int aic32x4_setup_clocks(struct snd_soc_component *component,
                                unsigned int sample_rate, unsigned int channels,
                                unsigned int bit_depth)
 {
+       struct aic32x4_priv *aic32x4 = snd_soc_component_get_drvdata(component);
        u8 aosr;
        u16 dosr;
        u8 adc_resource_class, dac_resource_class;
@@ -721,19 +731,28 @@ static int aic32x4_setup_clocks(struct snd_soc_component *component,
                adc_resource_class = 6;
                dac_resource_class = 8;
                dosr_increment = 8;
-               aic32x4_set_processing_blocks(component, 1, 1);
+               if (aic32x4->type == AIC32X4_TYPE_TAS2505)
+                       aic32x4_set_processing_blocks(component, 0, 1);
+               else
+                       aic32x4_set_processing_blocks(component, 1, 1);
        } else if (sample_rate <= 96000) {
                aosr = 64;
                adc_resource_class = 6;
                dac_resource_class = 8;
                dosr_increment = 4;
-               aic32x4_set_processing_blocks(component, 1, 9);
+               if (aic32x4->type == AIC32X4_TYPE_TAS2505)
+                       aic32x4_set_processing_blocks(component, 0, 1);
+               else
+                       aic32x4_set_processing_blocks(component, 1, 9);
        } else if (sample_rate == 192000) {
                aosr = 32;
                adc_resource_class = 3;
                dac_resource_class = 4;
                dosr_increment = 2;
-               aic32x4_set_processing_blocks(component, 13, 19);
+               if (aic32x4->type == AIC32X4_TYPE_TAS2505)
+                       aic32x4_set_processing_blocks(component, 0, 1);
+               else
+                       aic32x4_set_processing_blocks(component, 13, 19);
        } else {
                dev_err(component->dev, "Sampling rate not supported\n");
                return -EINVAL;
index 549d982..fe15cbc 100644 (file)
@@ -747,7 +747,6 @@ static void wm_adsp2_init_debugfs(struct wm_adsp *dsp,
 static void wm_adsp2_cleanup_debugfs(struct wm_adsp *dsp)
 {
        wm_adsp_debugfs_clear(dsp);
-       debugfs_remove_recursive(dsp->debugfs_root);
 }
 #else
 static inline void wm_adsp2_init_debugfs(struct wm_adsp *dsp,
index 4124aa2..905c796 100644 (file)
@@ -127,7 +127,7 @@ static void sst_fill_alloc_params(struct snd_pcm_substream *substream,
        snd_pcm_uframes_t period_size;
        ssize_t periodbytes;
        ssize_t buffer_bytes = snd_pcm_lib_buffer_bytes(substream);
-       u32 buffer_addr = virt_to_phys(substream->dma_buffer.area);
+       u32 buffer_addr = virt_to_phys(substream->runtime->dma_area);
 
        channels = substream->runtime->channels;
        period_size = substream->runtime->period_size;
@@ -233,7 +233,6 @@ static int sst_platform_alloc_stream(struct snd_pcm_substream *substream,
        /* set codec params and inform SST driver the same */
        sst_fill_pcm_params(substream, &param);
        sst_fill_alloc_params(substream, &alloc_params);
-       substream->runtime->dma_area = substream->dma_buffer.area;
        str_params.sparams = param;
        str_params.aparams = alloc_params;
        str_params.codec = SST_CODEC_TYPE_PCM;
index 896251d..b7b3b0b 100644 (file)
@@ -404,7 +404,7 @@ static int audio_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        /* By default dais[0] is configured for max98373 */
-       if (!strcmp(pdev->name, "sof_da7219_max98360a")) {
+       if (!strcmp(pdev->name, "sof_da7219_mx98360a")) {
                dais[0] = (struct snd_soc_dai_link) {
                        .name = "SSP1-Codec",
                        .id = 0,
index c2a5933..700a185 100644 (file)
@@ -104,8 +104,6 @@ static int kirkwood_dma_open(struct snd_soc_component *component,
        int err;
        struct snd_pcm_runtime *runtime = substream->runtime;
        struct kirkwood_dma_data *priv = kirkwood_priv(substream);
-       const struct mbus_dram_target_info *dram;
-       unsigned long addr;
 
        snd_soc_set_runtime_hwparams(substream, &kirkwood_dma_snd_hw);
 
@@ -142,20 +140,14 @@ static int kirkwood_dma_open(struct snd_soc_component *component,
                writel((unsigned int)-1, priv->io + KIRKWOOD_ERR_MASK);
        }
 
-       dram = mv_mbus_dram_info();
-       addr = substream->dma_buffer.addr;
        if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
                if (priv->substream_play)
                        return -EBUSY;
                priv->substream_play = substream;
-               kirkwood_dma_conf_mbus_windows(priv->io,
-                       KIRKWOOD_PLAYBACK_WIN, addr, dram);
        } else {
                if (priv->substream_rec)
                        return -EBUSY;
                priv->substream_rec = substream;
-               kirkwood_dma_conf_mbus_windows(priv->io,
-                       KIRKWOOD_RECORD_WIN, addr, dram);
        }
 
        return 0;
@@ -182,6 +174,23 @@ static int kirkwood_dma_close(struct snd_soc_component *component,
        return 0;
 }
 
+static int kirkwood_dma_hw_params(struct snd_soc_component *component,
+                                 struct snd_pcm_substream *substream,
+                                 struct snd_pcm_hw_params *params)
+{
+       struct kirkwood_dma_data *priv = kirkwood_priv(substream);
+       const struct mbus_dram_target_info *dram = mv_mbus_dram_info();
+       unsigned long addr = substream->runtime->dma_addr;
+
+       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+               kirkwood_dma_conf_mbus_windows(priv->io,
+                       KIRKWOOD_PLAYBACK_WIN, addr, dram);
+       else
+               kirkwood_dma_conf_mbus_windows(priv->io,
+                       KIRKWOOD_RECORD_WIN, addr, dram);
+       return 0;
+}
+
 static int kirkwood_dma_prepare(struct snd_soc_component *component,
                                struct snd_pcm_substream *substream)
 {
@@ -246,6 +255,7 @@ const struct snd_soc_component_driver kirkwood_soc_component = {
        .name           = DRV_NAME,
        .open           = kirkwood_dma_open,
        .close          = kirkwood_dma_close,
+       .hw_params      = kirkwood_dma_hw_params,
        .prepare        = kirkwood_dma_prepare,
        .pointer        = kirkwood_dma_pointer,
        .pcm_construct  = kirkwood_dma_new,
index 3a5e84e..c8dfd0d 100644 (file)
@@ -148,86 +148,75 @@ int snd_soc_component_set_bias_level(struct snd_soc_component *component,
        return soc_component_ret(component, ret);
 }
 
-static int soc_component_pin(struct snd_soc_component *component,
-                            const char *pin,
-                            int (*pin_func)(struct snd_soc_dapm_context *dapm,
-                                            const char *pin))
-{
-       struct snd_soc_dapm_context *dapm =
-               snd_soc_component_get_dapm(component);
-       char *full_name;
-       int ret;
-
-       if (!component->name_prefix) {
-               ret = pin_func(dapm, pin);
-               goto end;
-       }
-
-       full_name = kasprintf(GFP_KERNEL, "%s %s", component->name_prefix, pin);
-       if (!full_name) {
-               ret = -ENOMEM;
-               goto end;
-       }
-
-       ret = pin_func(dapm, full_name);
-       kfree(full_name);
-end:
-       return soc_component_ret(component, ret);
-}
-
 int snd_soc_component_enable_pin(struct snd_soc_component *component,
                                 const char *pin)
 {
-       return soc_component_pin(component, pin, snd_soc_dapm_enable_pin);
+       struct snd_soc_dapm_context *dapm =
+               snd_soc_component_get_dapm(component);
+       return snd_soc_dapm_enable_pin(dapm, pin);
 }
 EXPORT_SYMBOL_GPL(snd_soc_component_enable_pin);
 
 int snd_soc_component_enable_pin_unlocked(struct snd_soc_component *component,
                                          const char *pin)
 {
-       return soc_component_pin(component, pin, snd_soc_dapm_enable_pin_unlocked);
+       struct snd_soc_dapm_context *dapm =
+               snd_soc_component_get_dapm(component);
+       return snd_soc_dapm_enable_pin_unlocked(dapm, pin);
 }
 EXPORT_SYMBOL_GPL(snd_soc_component_enable_pin_unlocked);
 
 int snd_soc_component_disable_pin(struct snd_soc_component *component,
                                  const char *pin)
 {
-       return soc_component_pin(component, pin, snd_soc_dapm_disable_pin);
+       struct snd_soc_dapm_context *dapm =
+               snd_soc_component_get_dapm(component);
+       return snd_soc_dapm_disable_pin(dapm, pin);
 }
 EXPORT_SYMBOL_GPL(snd_soc_component_disable_pin);
 
 int snd_soc_component_disable_pin_unlocked(struct snd_soc_component *component,
                                           const char *pin)
 {
-       return soc_component_pin(component, pin, snd_soc_dapm_disable_pin_unlocked);
+       struct snd_soc_dapm_context *dapm = 
+               snd_soc_component_get_dapm(component);
+       return snd_soc_dapm_disable_pin_unlocked(dapm, pin);
 }
 EXPORT_SYMBOL_GPL(snd_soc_component_disable_pin_unlocked);
 
 int snd_soc_component_nc_pin(struct snd_soc_component *component,
                             const char *pin)
 {
-       return soc_component_pin(component, pin, snd_soc_dapm_nc_pin);
+       struct snd_soc_dapm_context *dapm =
+               snd_soc_component_get_dapm(component);
+       return snd_soc_dapm_nc_pin(dapm, pin);
 }
 EXPORT_SYMBOL_GPL(snd_soc_component_nc_pin);
 
 int snd_soc_component_nc_pin_unlocked(struct snd_soc_component *component,
                                      const char *pin)
 {
-       return soc_component_pin(component, pin, snd_soc_dapm_nc_pin_unlocked);
+       struct snd_soc_dapm_context *dapm =
+               snd_soc_component_get_dapm(component);
+       return snd_soc_dapm_nc_pin_unlocked(dapm, pin);
 }
 EXPORT_SYMBOL_GPL(snd_soc_component_nc_pin_unlocked);
 
 int snd_soc_component_get_pin_status(struct snd_soc_component *component,
                                     const char *pin)
 {
-       return soc_component_pin(component, pin, snd_soc_dapm_get_pin_status);
+       struct snd_soc_dapm_context *dapm =
+               snd_soc_component_get_dapm(component);
+       return snd_soc_dapm_get_pin_status(dapm, pin);
 }
 EXPORT_SYMBOL_GPL(snd_soc_component_get_pin_status);
 
 int snd_soc_component_force_enable_pin(struct snd_soc_component *component,
                                       const char *pin)
 {
-       return soc_component_pin(component, pin, snd_soc_dapm_force_enable_pin);
+       struct snd_soc_dapm_context *dapm =
+               snd_soc_component_get_dapm(component);
+       return snd_soc_dapm_force_enable_pin(dapm, pin);
 }
 EXPORT_SYMBOL_GPL(snd_soc_component_force_enable_pin);
 
@@ -235,7 +224,9 @@ int snd_soc_component_force_enable_pin_unlocked(
        struct snd_soc_component *component,
        const char *pin)
 {
-       return soc_component_pin(component, pin, snd_soc_dapm_force_enable_pin_unlocked);
+       struct snd_soc_dapm_context *dapm =
+               snd_soc_component_get_dapm(component);
+       return snd_soc_dapm_force_enable_pin_unlocked(dapm, pin);
 }
 EXPORT_SYMBOL_GPL(snd_soc_component_force_enable_pin_unlocked);
 
index 4bce89b..4447f51 100644 (file)
@@ -278,6 +278,8 @@ config SND_SOC_SOF_HDA
 
 config SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE
        tristate
+       select SOUNDWIRE_INTEL if SND_SOC_SOF_INTEL_SOUNDWIRE
+       select SND_INTEL_SOUNDWIRE_ACPI if SND_SOC_SOF_INTEL_SOUNDWIRE
 
 config SND_SOC_SOF_INTEL_SOUNDWIRE
        tristate "SOF support for SoundWire"
@@ -285,8 +287,6 @@ config SND_SOC_SOF_INTEL_SOUNDWIRE
        depends on SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE
        depends on ACPI && SOUNDWIRE
        depends on !(SOUNDWIRE=m && SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE=y)
-       select SOUNDWIRE_INTEL
-       select SND_INTEL_SOUNDWIRE_ACPI
        help
          This adds support for SoundWire with Sound Open Firmware
          for Intel(R) platforms.
index c91aa95..acfeca4 100644 (file)
@@ -107,8 +107,8 @@ void hda_dsp_ipc_get_reply(struct snd_sof_dev *sdev)
        } else {
                /* reply correct size ? */
                if (reply.hdr.size != msg->reply_size &&
-                       /* getter payload is never known upfront */
-                       !(reply.hdr.cmd & SOF_IPC_GLB_PROBE)) {
+                   /* getter payload is never known upfront */
+                   ((reply.hdr.cmd & SOF_GLB_TYPE_MASK) != SOF_IPC_GLB_PROBE)) {
                        dev_err(sdev->dev, "error: reply expected %zu got %u bytes\n",
                                msg->reply_size, reply.hdr.size);
                        ret = -EINVAL;
index e1e368f..891e6e1 100644 (file)
@@ -187,12 +187,16 @@ static int hda_sdw_probe(struct snd_sof_dev *sdev)
 int hda_sdw_startup(struct snd_sof_dev *sdev)
 {
        struct sof_intel_hda_dev *hdev;
+       struct snd_sof_pdata *pdata = sdev->pdata;
 
        hdev = sdev->pdata->hw_pdata;
 
        if (!hdev->sdw)
                return 0;
 
+       if (pdata->machine && !pdata->machine->mach_params.link_mask)
+               return 0;
+
        return sdw_intel_startup(hdev->sdw);
 }
 
@@ -1002,6 +1006,14 @@ static int hda_generic_machine_select(struct snd_sof_dev *sdev)
                        hda_mach->mach_params.dmic_num = dmic_num;
                        pdata->machine = hda_mach;
                        pdata->tplg_filename = tplg_filename;
+
+                       if (codec_num == 2) {
+                               /*
+                                * Prevent SoundWire links from starting when an external
+                                * HDaudio codec is used
+                                */
+                               hda_mach->mach_params.link_mask = 0;
+                       }
                }
        }
 
index 3c1628a..3d9736e 100644 (file)
@@ -198,7 +198,7 @@ static int uniphier_aiodma_mmap(struct snd_soc_component *component,
        vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
 
        return remap_pfn_range(vma, vma->vm_start,
-                              substream->dma_buffer.addr >> PAGE_SHIFT,
+                              substream->runtime->dma_addr >> PAGE_SHIFT,
                               vma->vm_end - vma->vm_start, vma->vm_page_prot);
 }
 
index 1d59fb6..91afea9 100644 (file)
@@ -452,8 +452,8 @@ static int xlnx_formatter_pcm_hw_params(struct snd_soc_component *component,
 
        stream_data->buffer_size = size;
 
-       low = lower_32_bits(substream->dma_buffer.addr);
-       high = upper_32_bits(substream->dma_buffer.addr);
+       low = lower_32_bits(runtime->dma_addr);
+       high = upper_32_bits(runtime->dma_addr);
        writel(low, stream_data->mmio + XLNX_AUD_BUFF_ADDR_LSB);
        writel(high, stream_data->mmio + XLNX_AUD_BUFF_ADDR_MSB);
 
index 2f6a624..a1f8c3a 100644 (file)
@@ -907,7 +907,7 @@ static void usb_audio_disconnect(struct usb_interface *intf)
                }
        }
 
-       if (chip->quirk_type & QUIRK_SETUP_DISABLE_AUTOSUSPEND)
+       if (chip->quirk_type == QUIRK_SETUP_DISABLE_AUTOSUSPEND)
                usb_enable_autosuspend(interface_to_usbdev(intf));
 
        chip->num_interfaces--;
index 52de522..14456f6 100644 (file)
@@ -324,6 +324,12 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip,
                                              sources[ret - 1],
                                              visited, validate);
                if (ret > 0) {
+                       /*
+                        * For Samsung USBC Headset (AKG), setting clock selector again
+                        * will result in incorrect default clock setting problems
+                        */
+                       if (chip->usb_id == USB_ID(0x04e8, 0xa051))
+                               return ret;
                        err = uac_clock_selector_set_val(chip, entity_id, cur);
                        if (err < 0)
                                return err;
index f4cdaf1..9b713b4 100644 (file)
@@ -1816,6 +1816,15 @@ static void get_connector_control_name(struct usb_mixer_interface *mixer,
                strlcat(name, " - Output Jack", name_size);
 }
 
+/* get connector value to "wake up" the USB audio */
+static int connector_mixer_resume(struct usb_mixer_elem_list *list)
+{
+       struct usb_mixer_elem_info *cval = mixer_elem_list_to_info(list);
+
+       get_connector_value(cval, NULL, NULL);
+       return 0;
+}
+
 /* Build a mixer control for a UAC connector control (jack-detect) */
 static void build_connector_control(struct usb_mixer_interface *mixer,
                                    const struct usbmix_name_map *imap,
@@ -1833,6 +1842,10 @@ static void build_connector_control(struct usb_mixer_interface *mixer,
        if (!cval)
                return;
        snd_usb_mixer_elem_init_std(&cval->head, mixer, term->id);
+
+       /* set up a specific resume callback */
+       cval->head.resume = connector_mixer_resume;
+
        /*
         * UAC2: The first byte from reading the UAC2_TE_CONNECTOR control returns the
         * number of channels connected.
@@ -3642,23 +3655,15 @@ static int restore_mixer_value(struct usb_mixer_elem_list *list)
        return 0;
 }
 
-static int default_mixer_resume(struct usb_mixer_elem_list *list)
-{
-       struct usb_mixer_elem_info *cval = mixer_elem_list_to_info(list);
-
-       /* get connector value to "wake up" the USB audio */
-       if (cval->val_type == USB_MIXER_BOOLEAN && cval->channels == 1)
-               get_connector_value(cval, NULL, NULL);
-
-       return 0;
-}
-
 static int default_mixer_reset_resume(struct usb_mixer_elem_list *list)
 {
-       int err = default_mixer_resume(list);
+       int err;
 
-       if (err < 0)
-               return err;
+       if (list->resume) {
+               err = list->resume(list);
+               if (err < 0)
+                       return err;
+       }
        return restore_mixer_value(list);
 }
 
@@ -3697,7 +3702,7 @@ void snd_usb_mixer_elem_init_std(struct usb_mixer_elem_list *list,
        list->id = unitid;
        list->dump = snd_usb_mixer_dump_cval;
 #ifdef CONFIG_PM
-       list->resume = default_mixer_resume;
+       list->resume = NULL;
        list->reset_resume = default_mixer_reset_resume;
 #endif
 }
index f9d698a..3d5848d 100644 (file)
@@ -228,7 +228,7 @@ enum {
 };
 
 static const char *const scarlett2_dim_mute_names[SCARLETT2_DIM_MUTE_COUNT] = {
-       "Mute", "Dim"
+       "Mute Playback Switch", "Dim Playback Switch"
 };
 
 /* Description of each hardware port type:
@@ -1856,9 +1856,15 @@ static int scarlett2_mute_ctl_get(struct snd_kcontrol *kctl,
                                        struct snd_ctl_elem_value *ucontrol)
 {
        struct usb_mixer_elem_info *elem = kctl->private_data;
-       struct scarlett2_data *private = elem->head.mixer->private_data;
+       struct usb_mixer_interface *mixer = elem->head.mixer;
+       struct scarlett2_data *private = mixer->private_data;
        int index = line_out_remap(private, elem->control);
 
+       mutex_lock(&private->data_mutex);
+       if (private->vol_updated)
+               scarlett2_update_volumes(mixer);
+       mutex_unlock(&private->data_mutex);
+
        ucontrol->value.integer.value[0] = private->mute_switch[index];
        return 0;
 }
@@ -1955,10 +1961,12 @@ static void scarlett2_vol_ctl_set_writable(struct usb_mixer_interface *mixer,
                        ~SNDRV_CTL_ELEM_ACCESS_WRITE;
        }
 
-       /* Notify of write bit change */
-       snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_INFO,
+       /* Notify of write bit and possible value change */
+       snd_ctl_notify(card,
+                      SNDRV_CTL_EVENT_MASK_VALUE | SNDRV_CTL_EVENT_MASK_INFO,
                       &private->vol_ctls[index]->id);
-       snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_INFO,
+       snd_ctl_notify(card,
+                      SNDRV_CTL_EVENT_MASK_VALUE | SNDRV_CTL_EVENT_MASK_INFO,
                       &private->mute_ctls[index]->id);
 }
 
@@ -2530,14 +2538,18 @@ static int scarlett2_add_direct_monitor_ctl(struct usb_mixer_interface *mixer)
 {
        struct scarlett2_data *private = mixer->private_data;
        const struct scarlett2_device_info *info = private->info;
+       const char *s;
 
        if (!info->direct_monitor)
                return 0;
 
+       s = info->direct_monitor == 1
+             ? "Direct Monitor Playback Switch"
+             : "Direct Monitor Playback Enum";
+
        return scarlett2_add_new_ctl(
                mixer, &scarlett2_direct_monitor_ctl[info->direct_monitor - 1],
-               0, 1, "Direct Monitor Playback Switch",
-               &private->direct_monitor_ctl);
+               0, 1, s, &private->direct_monitor_ctl);
 }
 
 /*** Speaker Switching Control ***/
@@ -2589,7 +2601,9 @@ static int scarlett2_speaker_switch_enable(struct usb_mixer_interface *mixer)
 
                /* disable the line out SW/HW switch */
                scarlett2_sw_hw_ctl_ro(private, i);
-               snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_INFO,
+               snd_ctl_notify(card,
+                              SNDRV_CTL_EVENT_MASK_VALUE |
+                                SNDRV_CTL_EVENT_MASK_INFO,
                               &private->sw_hw_ctls[i]->id);
        }
 
@@ -2913,7 +2927,7 @@ static int scarlett2_dim_mute_ctl_put(struct snd_kcontrol *kctl,
                        if (private->vol_sw_hw_switch[line_index]) {
                                private->mute_switch[line_index] = val;
                                snd_ctl_notify(mixer->chip->card,
-                                              SNDRV_CTL_EVENT_MASK_INFO,
+                                              SNDRV_CTL_EVENT_MASK_VALUE,
                                               &private->mute_ctls[i]->id);
                        }
                }
@@ -3455,7 +3469,7 @@ static int scarlett2_add_msd_ctl(struct usb_mixer_interface *mixer)
 
        /* Add MSD control */
        return scarlett2_add_new_ctl(mixer, &scarlett2_msd_ctl,
-                                    0, 1, "MSD Mode", NULL);
+                                    0, 1, "MSD Mode Switch", NULL);
 }
 
 /*** Cleanup/Suspend Callbacks ***/
index e7accd8..326d1b0 100644 (file)
@@ -1899,6 +1899,7 @@ static const struct registration_quirk registration_quirks[] = {
        REG_QUIRK_ENTRY(0x0951, 0x16ea, 2),     /* Kingston HyperX Cloud Flight S */
        REG_QUIRK_ENTRY(0x0ecb, 0x1f46, 2),     /* JBL Quantum 600 */
        REG_QUIRK_ENTRY(0x0ecb, 0x2039, 2),     /* JBL Quantum 400 */
+       REG_QUIRK_ENTRY(0x0ecb, 0x203c, 2),     /* JBL Quantum 600 */
        REG_QUIRK_ENTRY(0x0ecb, 0x203e, 2),     /* JBL Quantum 800 */
        { 0 }                                   /* terminator */
 };
index 8b7a983..3430667 100644 (file)
@@ -1031,7 +1031,7 @@ struct sys_stat_struct {
  *     scall32-o32.S in the kernel sources.
  *   - the system call is performed by calling "syscall"
  *   - syscall return comes in v0, and register a3 needs to be checked to know
- *     if an error occured, in which case errno is in v0.
+ *     if an error occurred, in which case errno is in v0.
  *   - the arguments are cast to long and assigned into the target registers
  *     which are then simply passed as registers to the asm code, so that we
  *     don't have to experience issues with register constraints.
@@ -2243,6 +2243,19 @@ unsigned int sleep(unsigned int seconds)
                return 0;
 }
 
+static __attribute__((unused))
+int msleep(unsigned int msecs)
+{
+       struct timeval my_timeval = { msecs / 1000, (msecs % 1000) * 1000 };
+
+       if (sys_select(0, 0, 0, 0, &my_timeval) < 0)
+               return (my_timeval.tv_sec * 1000) +
+                       (my_timeval.tv_usec / 1000) +
+                       !!(my_timeval.tv_usec % 1000);
+       else
+               return 0;
+}
+
 static __attribute__((unused))
 int stat(const char *path, struct stat *buf)
 {
index 8146181..d9bd6f5 100644 (file)
@@ -131,8 +131,7 @@ static int copy_file(struct io_uring *ring, off_t insize)
        writes = reads = offset = 0;
 
        while (insize || write_left) {
-               unsigned long had_reads;
-               int got_comp;
+               int had_reads, got_comp;
 
                /*
                 * Queue up as many reads as we can
@@ -174,8 +173,13 @@ static int copy_file(struct io_uring *ring, off_t insize)
                        if (!got_comp) {
                                ret = io_uring_wait_cqe(ring, &cqe);
                                got_comp = 1;
-                       } else
+                       } else {
                                ret = io_uring_peek_cqe(ring, &cqe);
+                               if (ret == -EAGAIN) {
+                                       cqe = NULL;
+                                       ret = 0;
+                               }
+                       }
                        if (ret < 0) {
                                fprintf(stderr, "io_uring_peek_cqe: %s\n",
                                                        strerror(-ret));
@@ -194,7 +198,7 @@ static int copy_file(struct io_uring *ring, off_t insize)
                                fprintf(stderr, "cqe failed: %s\n",
                                                strerror(-cqe->res));
                                return 1;
-                       } else if ((size_t) cqe->res != data->iov.iov_len) {
+                       } else if (cqe->res != data->iov.iov_len) {
                                /* Short read/write, adjust and requeue */
                                data->iov.iov_base += cqe->res;
                                data->iov.iov_len -= cqe->res;
@@ -221,6 +225,25 @@ static int copy_file(struct io_uring *ring, off_t insize)
                }
        }
 
+       /* wait out pending writes */
+       while (writes) {
+               struct io_data *data;
+
+               ret = io_uring_wait_cqe(ring, &cqe);
+               if (ret) {
+                       fprintf(stderr, "wait_cqe=%d\n", ret);
+                       return 1;
+               }
+               if (cqe->res < 0) {
+                       fprintf(stderr, "write res=%d\n", cqe->res);
+                       return 1;
+               }
+               data = io_uring_cqe_get_data(cqe);
+               free(data);
+               writes--;
+               io_uring_cqe_seen(ring, cqe);
+       }
+
        return 0;
 }
 
index b46760b..7ff3d5c 100644 (file)
@@ -804,6 +804,7 @@ static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf)
        btf->nr_types = 0;
        btf->start_id = 1;
        btf->start_str_off = 0;
+       btf->fd = -1;
 
        if (base_btf) {
                btf->base_btf = base_btf;
@@ -832,8 +833,6 @@ static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf)
        if (err)
                goto done;
 
-       btf->fd = -1;
-
 done:
        if (err) {
                btf__free(btf);
index ecaae29..cd8c703 100644 (file)
@@ -75,6 +75,9 @@ probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns,
        case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
                xattr.expected_attach_type = BPF_CGROUP_INET4_CONNECT;
                break;
+       case BPF_PROG_TYPE_CGROUP_SOCKOPT:
+               xattr.expected_attach_type = BPF_CGROUP_GETSOCKOPT;
+               break;
        case BPF_PROG_TYPE_SK_LOOKUP:
                xattr.expected_attach_type = BPF_SK_LOOKUP;
                break;
@@ -104,7 +107,6 @@ probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns,
        case BPF_PROG_TYPE_SK_REUSEPORT:
        case BPF_PROG_TYPE_FLOW_DISSECTOR:
        case BPF_PROG_TYPE_CGROUP_SYSCTL:
-       case BPF_PROG_TYPE_CGROUP_SOCKOPT:
        case BPF_PROG_TYPE_TRACING:
        case BPF_PROG_TYPE_STRUCT_OPS:
        case BPF_PROG_TYPE_EXT:
index 22f8326..bc1f648 100644 (file)
@@ -2434,6 +2434,22 @@ static int cs_etm__process_event(struct perf_session *session,
        return 0;
 }
 
+static void dump_queued_data(struct cs_etm_auxtrace *etm,
+                            struct perf_record_auxtrace *event)
+{
+       struct auxtrace_buffer *buf;
+       unsigned int i;
+       /*
+        * Find all buffers with same reference in the queues and dump them.
+        * This is because the queues can contain multiple entries of the same
+        * buffer that were split on aux records.
+        */
+       for (i = 0; i < etm->queues.nr_queues; ++i)
+               list_for_each_entry(buf, &etm->queues.queue_array[i].head, list)
+                       if (buf->reference == event->reference)
+                               cs_etm__dump_event(etm, buf);
+}
+
 static int cs_etm__process_auxtrace_event(struct perf_session *session,
                                          union perf_event *event,
                                          struct perf_tool *tool __maybe_unused)
@@ -2466,7 +2482,8 @@ static int cs_etm__process_auxtrace_event(struct perf_session *session,
                                cs_etm__dump_event(etm, buffer);
                                auxtrace_buffer__put_data(buffer);
                        }
-       }
+       } else if (dump_trace)
+               dump_queued_data(etm, &event->auxtrace);
 
        return 0;
 }
@@ -3042,7 +3059,6 @@ int cs_etm__process_auxtrace_info(union perf_event *event,
 
        if (dump_trace) {
                cs_etm__print_auxtrace_info(auxtrace_info->priv, num_cpu);
-               return 0;
        }
 
        err = cs_etm__synth_events(etm, session);
index 72e7f36..8af693d 100644 (file)
@@ -192,8 +192,6 @@ struct map *map__new(struct machine *machine, u64 start, u64 len,
                        if (!(prot & PROT_EXEC))
                                dso__set_loaded(dso);
                }
-
-               nsinfo__put(dso->nsinfo);
                dso->nsinfo = nsi;
 
                if (build_id__is_defined(bid))
index a1bd700..fc683bc 100644 (file)
@@ -742,9 +742,13 @@ struct pmu_events_map *__weak pmu_events_map__find(void)
        return perf_pmu__find_map(NULL);
 }
 
-static bool perf_pmu__valid_suffix(char *pmu_name, char *tok)
+/*
+ * Suffix must be in form tok_{digits}, or tok{digits}, or same as pmu_name
+ * to be valid.
+ */
+static bool perf_pmu__valid_suffix(const char *pmu_name, char *tok)
 {
-       char *p;
+       const char *p;
 
        if (strncmp(pmu_name, tok, strlen(tok)))
                return false;
@@ -753,12 +757,16 @@ static bool perf_pmu__valid_suffix(char *pmu_name, char *tok)
        if (*p == 0)
                return true;
 
-       if (*p != '_')
-               return false;
+       if (*p == '_')
+               ++p;
 
-       ++p;
-       if (*p == 0 || !isdigit(*p))
-               return false;
+       /* Ensure we end in a number */
+       while (1) {
+               if (!isdigit(*p))
+                       return false;
+               if (*(++p) == 0)
+                       break;
+       }
 
        return true;
 }
@@ -789,12 +797,19 @@ bool pmu_uncore_alias_match(const char *pmu_name, const char *name)
         *          match "socket" in "socketX_pmunameY" and then "pmuname" in
         *          "pmunameY".
         */
-       for (; tok; name += strlen(tok), tok = strtok_r(NULL, ",", &tmp)) {
+       while (1) {
+               char *next_tok = strtok_r(NULL, ",", &tmp);
+
                name = strstr(name, tok);
-               if (!name || !perf_pmu__valid_suffix((char *)name, tok)) {
+               if (!name ||
+                   (!next_tok && !perf_pmu__valid_suffix(name, tok))) {
                        res = false;
                        goto out;
                }
+               if (!next_tok)
+                       break;
+               tok = next_tok;
+               name += strlen(tok);
        }
 
        res = true;
index 54f367c..b1bff5f 100644 (file)
@@ -434,7 +434,7 @@ static int nd_intel_test_finish_query(struct nfit_test *t,
                dev_dbg(dev, "%s: transition out verify\n", __func__);
                fw->state = FW_STATE_UPDATED;
                fw->missed_activate = false;
-               /* fall through */
+               fallthrough;
        case FW_STATE_UPDATED:
                nd_cmd->status = 0;
                /* bogus test version */
index 2c8935b..ee45432 100644 (file)
        .result = ACCEPT,
        .retval = 2,
 },
+{
+       "dead code: zero extension",
+       .insns = {
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4),
+       BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
+       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -4),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .retval = 0,
+},
index a3e593d..2debba4 100644 (file)
@@ -1,3 +1,232 @@
+{
+       "map access: known scalar += value_ptr unknown vs const",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                   offsetof(struct __sk_buff, len)),
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+       BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 4),
+       BPF_MOV64_IMM(BPF_REG_1, 6),
+       BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
+       BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_MOV64_IMM(BPF_REG_1, 3),
+       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_hash_16b = { 5 },
+       .fixup_map_array_48b = { 8 },
+       .result_unpriv = REJECT,
+       .errstr_unpriv = "R1 tried to add from different maps, paths or scalars",
+       .result = ACCEPT,
+       .retval = 1,
+},
+{
+       "map access: known scalar += value_ptr const vs unknown",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                   offsetof(struct __sk_buff, len)),
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+       BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 2),
+       BPF_MOV64_IMM(BPF_REG_1, 3),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 3),
+       BPF_MOV64_IMM(BPF_REG_1, 6),
+       BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
+       BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7),
+       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_hash_16b = { 5 },
+       .fixup_map_array_48b = { 8 },
+       .result_unpriv = REJECT,
+       .errstr_unpriv = "R1 tried to add from different maps, paths or scalars",
+       .result = ACCEPT,
+       .retval = 1,
+},
+{
+       "map access: known scalar += value_ptr const vs const (ne)",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                   offsetof(struct __sk_buff, len)),
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+       BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 2),
+       BPF_MOV64_IMM(BPF_REG_1, 3),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_MOV64_IMM(BPF_REG_1, 5),
+       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_hash_16b = { 5 },
+       .fixup_map_array_48b = { 8 },
+       .result_unpriv = REJECT,
+       .errstr_unpriv = "R1 tried to add from different maps, paths or scalars",
+       .result = ACCEPT,
+       .retval = 1,
+},
+{
+       "map access: known scalar += value_ptr const vs const (eq)",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                   offsetof(struct __sk_buff, len)),
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+       BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 2),
+       BPF_MOV64_IMM(BPF_REG_1, 5),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_MOV64_IMM(BPF_REG_1, 5),
+       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_hash_16b = { 5 },
+       .fixup_map_array_48b = { 8 },
+       .result = ACCEPT,
+       .retval = 1,
+},
+{
+       "map access: known scalar += value_ptr unknown vs unknown (eq)",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                   offsetof(struct __sk_buff, len)),
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
+       BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 4),
+       BPF_MOV64_IMM(BPF_REG_1, 6),
+       BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
+       BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 3),
+       BPF_MOV64_IMM(BPF_REG_1, 6),
+       BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
+       BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7),
+       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_hash_16b = { 5 },
+       .fixup_map_array_48b = { 8 },
+       .result = ACCEPT,
+       .retval = 1,
+},
+{
+       "map access: known scalar += value_ptr unknown vs unknown (lt)",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                   offsetof(struct __sk_buff, len)),
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
+       BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 4),
+       BPF_MOV64_IMM(BPF_REG_1, 6),
+       BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
+       BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x3),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 3),
+       BPF_MOV64_IMM(BPF_REG_1, 6),
+       BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
+       BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7),
+       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_hash_16b = { 5 },
+       .fixup_map_array_48b = { 8 },
+       .result_unpriv = REJECT,
+       .errstr_unpriv = "R1 tried to add from different maps, paths or scalars",
+       .result = ACCEPT,
+       .retval = 1,
+},
+{
+       "map access: known scalar += value_ptr unknown vs unknown (gt)",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                   offsetof(struct __sk_buff, len)),
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
+       BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 4),
+       BPF_MOV64_IMM(BPF_REG_1, 6),
+       BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
+       BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 3),
+       BPF_MOV64_IMM(BPF_REG_1, 6),
+       BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
+       BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x3),
+       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_hash_16b = { 5 },
+       .fixup_map_array_48b = { 8 },
+       .result_unpriv = REJECT,
+       .errstr_unpriv = "R1 tried to add from different maps, paths or scalars",
+       .result = ACCEPT,
+       .retval = 1,
+},
 {
        "map access: known scalar += value_ptr from different maps",
        .insns = {
index 06a351b..0709af0 100644 (file)
@@ -38,6 +38,7 @@
 /x86_64/xen_vmcall_test
 /x86_64/xss_msr_test
 /x86_64/vmx_pmu_msrs_test
+/access_tracking_perf_test
 /demand_paging_test
 /dirty_log_test
 /dirty_log_perf_test
index b853be2..5832f51 100644 (file)
@@ -71,6 +71,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/tsc_msrs_test
 TEST_GEN_PROGS_x86_64 += x86_64/vmx_pmu_msrs_test
 TEST_GEN_PROGS_x86_64 += x86_64/xen_shinfo_test
 TEST_GEN_PROGS_x86_64 += x86_64/xen_vmcall_test
+TEST_GEN_PROGS_x86_64 += access_tracking_perf_test
 TEST_GEN_PROGS_x86_64 += demand_paging_test
 TEST_GEN_PROGS_x86_64 += dirty_log_test
 TEST_GEN_PROGS_x86_64 += dirty_log_perf_test
index a16c8f0..cc89818 100644 (file)
@@ -1019,7 +1019,8 @@ static __u64 sve_rejects_set[] = {
 #define VREGS_SUBLIST \
        { "vregs", .regs = vregs, .regs_n = ARRAY_SIZE(vregs), }
 #define PMU_SUBLIST \
-       { "pmu", .regs = pmu_regs, .regs_n = ARRAY_SIZE(pmu_regs), }
+       { "pmu", .capability = KVM_CAP_ARM_PMU_V3, .feature = KVM_ARM_VCPU_PMU_V3, \
+         .regs = pmu_regs, .regs_n = ARRAY_SIZE(pmu_regs), }
 #define SVE_SUBLIST \
        { "sve", .capability = KVM_CAP_ARM_SVE, .feature = KVM_ARM_VCPU_SVE, .finalize = true, \
          .regs = sve_regs, .regs_n = ARRAY_SIZE(sve_regs), \
diff --git a/tools/testing/selftests/kvm/access_tracking_perf_test.c b/tools/testing/selftests/kvm/access_tracking_perf_test.c
new file mode 100644 (file)
index 0000000..e2baa18
--- /dev/null
@@ -0,0 +1,429 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * access_tracking_perf_test
+ *
+ * Copyright (C) 2021, Google, Inc.
+ *
+ * This test measures the performance effects of KVM's access tracking.
+ * Access tracking is driven by the MMU notifiers test_young, clear_young, and
+ * clear_flush_young. These notifiers do not have a direct userspace API,
+ * however the clear_young notifier can be triggered by marking a pages as idle
+ * in /sys/kernel/mm/page_idle/bitmap. This test leverages that mechanism to
+ * enable access tracking on guest memory.
+ *
+ * To measure performance this test runs a VM with a configurable number of
+ * vCPUs that each touch every page in disjoint regions of memory. Performance
+ * is measured in the time it takes all vCPUs to finish touching their
+ * predefined region.
+ *
+ * Note that a deterministic correctness test of access tracking is not possible
+ * by using page_idle as it exists today. This is for a few reasons:
+ *
+ * 1. page_idle only issues clear_young notifiers, which lack a TLB flush. This
+ *    means subsequent guest accesses are not guaranteed to see page table
+ *    updates made by KVM until some time in the future.
+ *
+ * 2. page_idle only operates on LRU pages. Newly allocated pages are not
+ *    immediately allocated to LRU lists. Instead they are held in a "pagevec",
+ *    which is drained to LRU lists some time in the future. There is no
+ *    userspace API to force this drain to occur.
+ *
+ * These limitations are worked around in this test by using a large enough
+ * region of memory for each vCPU such that the number of translations cached in
+ * the TLB and the number of pages held in pagevecs are a small fraction of the
+ * overall workload. And if either of those conditions are not true this test
+ * will fail rather than silently passing.
+ */
+#include <inttypes.h>
+#include <limits.h>
+#include <pthread.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include "kvm_util.h"
+#include "test_util.h"
+#include "perf_test_util.h"
+#include "guest_modes.h"
+
+/* Global variable used to synchronize all of the vCPU threads. */
+static int iteration = -1;
+
+/* Defines what vCPU threads should do during a given iteration. */
+static enum {
+       /* Run the vCPU to access all its memory. */
+       ITERATION_ACCESS_MEMORY,
+       /* Mark the vCPU's memory idle in page_idle. */
+       ITERATION_MARK_IDLE,
+} iteration_work;
+
+/* Set to true when vCPU threads should exit. */
+static bool done;
+
+/* The iteration that was last completed by each vCPU. */
+static int vcpu_last_completed_iteration[KVM_MAX_VCPUS];
+
+/* Whether to overlap the regions of memory vCPUs access. */
+static bool overlap_memory_access;
+
+struct test_params {
+       /* The backing source for the region of memory. */
+       enum vm_mem_backing_src_type backing_src;
+
+       /* The amount of memory to allocate for each vCPU. */
+       uint64_t vcpu_memory_bytes;
+
+       /* The number of vCPUs to create in the VM. */
+       int vcpus;
+};
+
+static uint64_t pread_uint64(int fd, const char *filename, uint64_t index)
+{
+       uint64_t value;
+       off_t offset = index * sizeof(value);
+
+       TEST_ASSERT(pread(fd, &value, sizeof(value), offset) == sizeof(value),
+                   "pread from %s offset 0x%" PRIx64 " failed!",
+                   filename, offset);
+
+       return value;
+
+}
+
+#define PAGEMAP_PRESENT (1ULL << 63)
+#define PAGEMAP_PFN_MASK ((1ULL << 55) - 1)
+
+static uint64_t lookup_pfn(int pagemap_fd, struct kvm_vm *vm, uint64_t gva)
+{
+       uint64_t hva = (uint64_t) addr_gva2hva(vm, gva);
+       uint64_t entry;
+       uint64_t pfn;
+
+       entry = pread_uint64(pagemap_fd, "pagemap", hva / getpagesize());
+       if (!(entry & PAGEMAP_PRESENT))
+               return 0;
+
+       pfn = entry & PAGEMAP_PFN_MASK;
+       if (!pfn) {
+               print_skip("Looking up PFNs requires CAP_SYS_ADMIN");
+               exit(KSFT_SKIP);
+       }
+
+       return pfn;
+}
+
+static bool is_page_idle(int page_idle_fd, uint64_t pfn)
+{
+       uint64_t bits = pread_uint64(page_idle_fd, "page_idle", pfn / 64);
+
+       return !!((bits >> (pfn % 64)) & 1);
+}
+
+static void mark_page_idle(int page_idle_fd, uint64_t pfn)
+{
+       uint64_t bits = 1ULL << (pfn % 64);
+
+       TEST_ASSERT(pwrite(page_idle_fd, &bits, 8, 8 * (pfn / 64)) == 8,
+                   "Set page_idle bits for PFN 0x%" PRIx64, pfn);
+}
+
+static void mark_vcpu_memory_idle(struct kvm_vm *vm, int vcpu_id)
+{
+       uint64_t base_gva = perf_test_args.vcpu_args[vcpu_id].gva;
+       uint64_t pages = perf_test_args.vcpu_args[vcpu_id].pages;
+       uint64_t page;
+       uint64_t still_idle = 0;
+       uint64_t no_pfn = 0;
+       int page_idle_fd;
+       int pagemap_fd;
+
+       /* If vCPUs are using an overlapping region, let vCPU 0 mark it idle. */
+       if (overlap_memory_access && vcpu_id)
+               return;
+
+       page_idle_fd = open("/sys/kernel/mm/page_idle/bitmap", O_RDWR);
+       TEST_ASSERT(page_idle_fd > 0, "Failed to open page_idle.");
+
+       pagemap_fd = open("/proc/self/pagemap", O_RDONLY);
+       TEST_ASSERT(pagemap_fd > 0, "Failed to open pagemap.");
+
+       for (page = 0; page < pages; page++) {
+               uint64_t gva = base_gva + page * perf_test_args.guest_page_size;
+               uint64_t pfn = lookup_pfn(pagemap_fd, vm, gva);
+
+               if (!pfn) {
+                       no_pfn++;
+                       continue;
+               }
+
+               if (is_page_idle(page_idle_fd, pfn)) {
+                       still_idle++;
+                       continue;
+               }
+
+               mark_page_idle(page_idle_fd, pfn);
+       }
+
+       /*
+        * Assumption: Less than 1% of pages are going to be swapped out from
+        * under us during this test.
+        */
+       TEST_ASSERT(no_pfn < pages / 100,
+                   "vCPU %d: No PFN for %" PRIu64 " out of %" PRIu64 " pages.",
+                   vcpu_id, no_pfn, pages);
+
+       /*
+        * Test that at least 90% of memory has been marked idle (the rest might
+        * not be marked idle because the pages have not yet made it to an LRU
+        * list or the translations are still cached in the TLB). 90% is
+        * arbitrary; high enough that we ensure most memory access went through
+        * access tracking but low enough as to not make the test too brittle
+        * over time and across architectures.
+        */
+       TEST_ASSERT(still_idle < pages / 10,
+                   "vCPU%d: Too many pages still idle (%"PRIu64 " out of %"
+                   PRIu64 ").\n",
+                   vcpu_id, still_idle, pages);
+
+       close(page_idle_fd);
+       close(pagemap_fd);
+}
+
+static void assert_ucall(struct kvm_vm *vm, uint32_t vcpu_id,
+                        uint64_t expected_ucall)
+{
+       struct ucall uc;
+       uint64_t actual_ucall = get_ucall(vm, vcpu_id, &uc);
+
+       TEST_ASSERT(expected_ucall == actual_ucall,
+                   "Guest exited unexpectedly (expected ucall %" PRIu64
+                   ", got %" PRIu64 ")",
+                   expected_ucall, actual_ucall);
+}
+
+static bool spin_wait_for_next_iteration(int *current_iteration)
+{
+       int last_iteration = *current_iteration;
+
+       do {
+               if (READ_ONCE(done))
+                       return false;
+
+               *current_iteration = READ_ONCE(iteration);
+       } while (last_iteration == *current_iteration);
+
+       return true;
+}
+
+static void *vcpu_thread_main(void *arg)
+{
+       struct perf_test_vcpu_args *vcpu_args = arg;
+       struct kvm_vm *vm = perf_test_args.vm;
+       int vcpu_id = vcpu_args->vcpu_id;
+       int current_iteration = -1;
+
+       vcpu_args_set(vm, vcpu_id, 1, vcpu_id);
+
+       while (spin_wait_for_next_iteration(&current_iteration)) {
+               switch (READ_ONCE(iteration_work)) {
+               case ITERATION_ACCESS_MEMORY:
+                       vcpu_run(vm, vcpu_id);
+                       assert_ucall(vm, vcpu_id, UCALL_SYNC);
+                       break;
+               case ITERATION_MARK_IDLE:
+                       mark_vcpu_memory_idle(vm, vcpu_id);
+                       break;
+               };
+
+               vcpu_last_completed_iteration[vcpu_id] = current_iteration;
+       }
+
+       return NULL;
+}
+
+static void spin_wait_for_vcpu(int vcpu_id, int target_iteration)
+{
+       while (READ_ONCE(vcpu_last_completed_iteration[vcpu_id]) !=
+              target_iteration) {
+               continue;
+       }
+}
+
+/* The type of memory accesses to perform in the VM. */
+enum access_type {
+       ACCESS_READ,
+       ACCESS_WRITE,
+};
+
+static void run_iteration(struct kvm_vm *vm, int vcpus, const char *description)
+{
+       struct timespec ts_start;
+       struct timespec ts_elapsed;
+       int next_iteration;
+       int vcpu_id;
+
+       /* Kick off the vCPUs by incrementing iteration. */
+       next_iteration = ++iteration;
+
+       clock_gettime(CLOCK_MONOTONIC, &ts_start);
+
+       /* Wait for all vCPUs to finish the iteration. */
+       for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++)
+               spin_wait_for_vcpu(vcpu_id, next_iteration);
+
+       ts_elapsed = timespec_elapsed(ts_start);
+       pr_info("%-30s: %ld.%09lds\n",
+               description, ts_elapsed.tv_sec, ts_elapsed.tv_nsec);
+}
+
+static void access_memory(struct kvm_vm *vm, int vcpus, enum access_type access,
+                         const char *description)
+{
+       perf_test_args.wr_fract = (access == ACCESS_READ) ? INT_MAX : 1;
+       sync_global_to_guest(vm, perf_test_args);
+       iteration_work = ITERATION_ACCESS_MEMORY;
+       run_iteration(vm, vcpus, description);
+}
+
+static void mark_memory_idle(struct kvm_vm *vm, int vcpus)
+{
+       /*
+        * Even though this parallelizes the work across vCPUs, this is still a
+        * very slow operation because page_idle forces the test to mark one pfn
+        * at a time and the clear_young notifier serializes on the KVM MMU
+        * lock.
+        */
+       pr_debug("Marking VM memory idle (slow)...\n");
+       iteration_work = ITERATION_MARK_IDLE;
+       run_iteration(vm, vcpus, "Mark memory idle");
+}
+
+static pthread_t *create_vcpu_threads(int vcpus)
+{
+       pthread_t *vcpu_threads;
+       int i;
+
+       vcpu_threads = malloc(vcpus * sizeof(vcpu_threads[0]));
+       TEST_ASSERT(vcpu_threads, "Failed to allocate vcpu_threads.");
+
+       for (i = 0; i < vcpus; i++) {
+               vcpu_last_completed_iteration[i] = iteration;
+               pthread_create(&vcpu_threads[i], NULL, vcpu_thread_main,
+                              &perf_test_args.vcpu_args[i]);
+       }
+
+       return vcpu_threads;
+}
+
+static void terminate_vcpu_threads(pthread_t *vcpu_threads, int vcpus)
+{
+       int i;
+
+       /* Set done to signal the vCPU threads to exit */
+       done = true;
+
+       for (i = 0; i < vcpus; i++)
+               pthread_join(vcpu_threads[i], NULL);
+}
+
+static void run_test(enum vm_guest_mode mode, void *arg)
+{
+       struct test_params *params = arg;
+       struct kvm_vm *vm;
+       pthread_t *vcpu_threads;
+       int vcpus = params->vcpus;
+
+       vm = perf_test_create_vm(mode, vcpus, params->vcpu_memory_bytes,
+                                params->backing_src);
+
+       perf_test_setup_vcpus(vm, vcpus, params->vcpu_memory_bytes,
+                             !overlap_memory_access);
+
+       vcpu_threads = create_vcpu_threads(vcpus);
+
+       pr_info("\n");
+       access_memory(vm, vcpus, ACCESS_WRITE, "Populating memory");
+
+       /* As a control, read and write to the populated memory first. */
+       access_memory(vm, vcpus, ACCESS_WRITE, "Writing to populated memory");
+       access_memory(vm, vcpus, ACCESS_READ, "Reading from populated memory");
+
+       /* Repeat on memory that has been marked as idle. */
+       mark_memory_idle(vm, vcpus);
+       access_memory(vm, vcpus, ACCESS_WRITE, "Writing to idle memory");
+       mark_memory_idle(vm, vcpus);
+       access_memory(vm, vcpus, ACCESS_READ, "Reading from idle memory");
+
+       terminate_vcpu_threads(vcpu_threads, vcpus);
+       free(vcpu_threads);
+       perf_test_destroy_vm(vm);
+}
+
+static void help(char *name)
+{
+       puts("");
+       printf("usage: %s [-h] [-m mode] [-b vcpu_bytes] [-v vcpus] [-o]  [-s mem_type]\n",
+              name);
+       puts("");
+       printf(" -h: Display this help message.");
+       guest_modes_help();
+       printf(" -b: specify the size of the memory region which should be\n"
+              "     dirtied by each vCPU. e.g. 10M or 3G.\n"
+              "     (default: 1G)\n");
+       printf(" -v: specify the number of vCPUs to run.\n");
+       printf(" -o: Overlap guest memory accesses instead of partitioning\n"
+              "     them into a separate region of memory for each vCPU.\n");
+       printf(" -s: specify the type of memory that should be used to\n"
+              "     back the guest data region.\n\n");
+       backing_src_help();
+       puts("");
+       exit(0);
+}
+
+int main(int argc, char *argv[])
+{
+       struct test_params params = {
+               .backing_src = VM_MEM_SRC_ANONYMOUS,
+               .vcpu_memory_bytes = DEFAULT_PER_VCPU_MEM_SIZE,
+               .vcpus = 1,
+       };
+       int page_idle_fd;
+       int opt;
+
+       guest_modes_append_default();
+
+       while ((opt = getopt(argc, argv, "hm:b:v:os:")) != -1) {
+               switch (opt) {
+               case 'm':
+                       guest_modes_cmdline(optarg);
+                       break;
+               case 'b':
+                       params.vcpu_memory_bytes = parse_size(optarg);
+                       break;
+               case 'v':
+                       params.vcpus = atoi(optarg);
+                       break;
+               case 'o':
+                       overlap_memory_access = true;
+                       break;
+               case 's':
+                       params.backing_src = parse_backing_src_type(optarg);
+                       break;
+               case 'h':
+               default:
+                       help(argv[0]);
+                       break;
+               }
+       }
+
+       page_idle_fd = open("/sys/kernel/mm/page_idle/bitmap", O_RDWR);
+       if (page_idle_fd < 0) {
+               print_skip("CONFIG_IDLE_PAGE_TRACKING is not enabled");
+               exit(KSFT_SKIP);
+       }
+       close(page_idle_fd);
+
+       for_each_guest_mode(run_test, &params);
+
+       return 0;
+}
index 04a2641..80cbd3a 100644 (file)
@@ -312,6 +312,7 @@ int main(int argc, char *argv[])
                        break;
                case 'o':
                        p.partition_vcpu_memory_access = false;
+                       break;
                case 's':
                        p.backing_src = parse_backing_src_type(optarg);
                        break;
index 412eaee..b669107 100644 (file)
 #define HV_X64_GUEST_DEBUGGING_AVAILABLE               BIT(1)
 #define HV_X64_PERF_MONITOR_AVAILABLE                  BIT(2)
 #define HV_X64_CPU_DYNAMIC_PARTITIONING_AVAILABLE      BIT(3)
-#define HV_X64_HYPERCALL_PARAMS_XMM_AVAILABLE          BIT(4)
+#define HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE           BIT(4)
 #define HV_X64_GUEST_IDLE_STATE_AVAILABLE              BIT(5)
 #define HV_FEATURE_FREQUENCY_MSRS_AVAILABLE            BIT(8)
 #define HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE           BIT(10)
 #define HV_STATUS_INVALID_CONNECTION_ID                18
 #define HV_STATUS_INSUFFICIENT_BUFFERS         19
 
+/* hypercall options */
+#define HV_HYPERCALL_FAST_BIT          BIT(16)
+
 #endif /* !SELFTEST_KVM_HYPERV_H */
index b0031f2..ecec308 100644 (file)
@@ -320,7 +320,7 @@ int main(int ac, char **av)
                run_delay = get_run_delay();
                pthread_create(&thread, &attr, do_steal_time, NULL);
                do
-                       pthread_yield();
+                       sched_yield();
                while (get_run_delay() - run_delay < MIN_RUN_DELAY_NS);
                pthread_join(thread, NULL);
                run_delay = get_run_delay() - run_delay;
index bab10ae..e0b2bb1 100644 (file)
@@ -215,7 +215,7 @@ int main(void)
        vcpu_set_hv_cpuid(vm, VCPU_ID);
 
        tsc_page_gva = vm_vaddr_alloc_page(vm);
-       memset(addr_gpa2hva(vm, tsc_page_gva), 0x0, getpagesize());
+       memset(addr_gva2hva(vm, tsc_page_gva), 0x0, getpagesize());
        TEST_ASSERT((addr_gva2gpa(vm, tsc_page_gva) & (getpagesize() - 1)) == 0,
                "TSC page has to be page aligned\n");
        vcpu_args_set(vm, VCPU_ID, 2, tsc_page_gva, addr_gva2gpa(vm, tsc_page_gva));
index af27c7e..91d88aa 100644 (file)
@@ -47,6 +47,7 @@ static void do_wrmsr(u32 idx, u64 val)
 }
 
 static int nr_gp;
+static int nr_ud;
 
 static inline u64 hypercall(u64 control, vm_vaddr_t input_address,
                            vm_vaddr_t output_address)
@@ -80,6 +81,12 @@ static void guest_gp_handler(struct ex_regs *regs)
                regs->rip = (uint64_t)&wrmsr_end;
 }
 
+static void guest_ud_handler(struct ex_regs *regs)
+{
+       nr_ud++;
+       regs->rip += 3;
+}
+
 struct msr_data {
        uint32_t idx;
        bool available;
@@ -90,6 +97,7 @@ struct msr_data {
 struct hcall_data {
        uint64_t control;
        uint64_t expect;
+       bool ud_expected;
 };
 
 static void guest_msr(struct msr_data *msr)
@@ -117,13 +125,26 @@ static void guest_msr(struct msr_data *msr)
 static void guest_hcall(vm_vaddr_t pgs_gpa, struct hcall_data *hcall)
 {
        int i = 0;
+       u64 res, input, output;
 
        wrmsr(HV_X64_MSR_GUEST_OS_ID, LINUX_OS_ID);
        wrmsr(HV_X64_MSR_HYPERCALL, pgs_gpa);
 
        while (hcall->control) {
-               GUEST_ASSERT(hypercall(hcall->control, pgs_gpa,
-                                      pgs_gpa + 4096) == hcall->expect);
+               nr_ud = 0;
+               if (!(hcall->control & HV_HYPERCALL_FAST_BIT)) {
+                       input = pgs_gpa;
+                       output = pgs_gpa + 4096;
+               } else {
+                       input = output = 0;
+               }
+
+               res = hypercall(hcall->control, input, output);
+               if (hcall->ud_expected)
+                       GUEST_ASSERT(nr_ud == 1);
+               else
+                       GUEST_ASSERT(res == hcall->expect);
+
                GUEST_SYNC(i++);
        }
 
@@ -552,8 +573,18 @@ static void guest_test_hcalls_access(struct kvm_vm *vm, struct hcall_data *hcall
                        recomm.ebx = 0xfff;
                        hcall->expect = HV_STATUS_SUCCESS;
                        break;
-
                case 17:
+                       /* XMM fast hypercall */
+                       hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE | HV_HYPERCALL_FAST_BIT;
+                       hcall->ud_expected = true;
+                       break;
+               case 18:
+                       feat.edx |= HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE;
+                       hcall->ud_expected = false;
+                       hcall->expect = HV_STATUS_SUCCESS;
+                       break;
+
+               case 19:
                        /* END */
                        hcall->control = 0;
                        break;
@@ -625,6 +656,10 @@ int main(void)
        /* Test hypercalls */
        vm = vm_create_default(VCPU_ID, 0, guest_hcall);
 
+       vm_init_descriptor_tables(vm);
+       vcpu_init_descriptor_tables(vm, VCPU_ID);
+       vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler);
+
        /* Hypercall input/output */
        hcall_page = vm_vaddr_alloc_pages(vm, 2);
        memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize());
index f23438d..3d7dde2 100644 (file)
@@ -484,13 +484,16 @@ enum desc_type {
        MONITOR_ACQUIRE,
        EXPIRE_STATE,
        EXPIRE_POLICY,
+       SPDINFO_ATTRS,
 };
 const char *desc_name[] = {
        "create tunnel",
        "alloc spi",
        "monitor acquire",
        "expire state",
-       "expire policy"
+       "expire policy",
+       "spdinfo attributes",
+       ""
 };
 struct xfrm_desc {
        enum desc_type  type;
@@ -1593,6 +1596,155 @@ out_close:
        return ret;
 }
 
+static int xfrm_spdinfo_set_thresh(int xfrm_sock, uint32_t *seq,
+               unsigned thresh4_l, unsigned thresh4_r,
+               unsigned thresh6_l, unsigned thresh6_r,
+               bool add_bad_attr)
+
+{
+       struct {
+               struct nlmsghdr         nh;
+               union {
+                       uint32_t        unused;
+                       int             error;
+               };
+               char                    attrbuf[MAX_PAYLOAD];
+       } req;
+       struct xfrmu_spdhthresh thresh;
+
+       memset(&req, 0, sizeof(req));
+       req.nh.nlmsg_len        = NLMSG_LENGTH(sizeof(req.unused));
+       req.nh.nlmsg_type       = XFRM_MSG_NEWSPDINFO;
+       req.nh.nlmsg_flags      = NLM_F_REQUEST | NLM_F_ACK;
+       req.nh.nlmsg_seq        = (*seq)++;
+
+       thresh.lbits = thresh4_l;
+       thresh.rbits = thresh4_r;
+       if (rtattr_pack(&req.nh, sizeof(req), XFRMA_SPD_IPV4_HTHRESH, &thresh, sizeof(thresh)))
+               return -1;
+
+       thresh.lbits = thresh6_l;
+       thresh.rbits = thresh6_r;
+       if (rtattr_pack(&req.nh, sizeof(req), XFRMA_SPD_IPV6_HTHRESH, &thresh, sizeof(thresh)))
+               return -1;
+
+       if (add_bad_attr) {
+               BUILD_BUG_ON(XFRMA_IF_ID <= XFRMA_SPD_MAX + 1);
+               if (rtattr_pack(&req.nh, sizeof(req), XFRMA_IF_ID, NULL, 0)) {
+                       pr_err("adding attribute failed: no space");
+                       return -1;
+               }
+       }
+
+       if (send(xfrm_sock, &req, req.nh.nlmsg_len, 0) < 0) {
+               pr_err("send()");
+               return -1;
+       }
+
+       if (recv(xfrm_sock, &req, sizeof(req), 0) < 0) {
+               pr_err("recv()");
+               return -1;
+       } else if (req.nh.nlmsg_type != NLMSG_ERROR) {
+               printk("expected NLMSG_ERROR, got %d", (int)req.nh.nlmsg_type);
+               return -1;
+       }
+
+       if (req.error) {
+               printk("NLMSG_ERROR: %d: %s", req.error, strerror(-req.error));
+               return -1;
+       }
+
+       return 0;
+}
+
+static int xfrm_spdinfo_attrs(int xfrm_sock, uint32_t *seq)
+{
+       struct {
+               struct nlmsghdr                 nh;
+               union {
+                       uint32_t        unused;
+                       int             error;
+               };
+               char                    attrbuf[MAX_PAYLOAD];
+       } req;
+
+       if (xfrm_spdinfo_set_thresh(xfrm_sock, seq, 32, 31, 120, 16, false)) {
+               pr_err("Can't set SPD HTHRESH");
+               return KSFT_FAIL;
+       }
+
+       memset(&req, 0, sizeof(req));
+
+       req.nh.nlmsg_len        = NLMSG_LENGTH(sizeof(req.unused));
+       req.nh.nlmsg_type       = XFRM_MSG_GETSPDINFO;
+       req.nh.nlmsg_flags      = NLM_F_REQUEST;
+       req.nh.nlmsg_seq        = (*seq)++;
+       if (send(xfrm_sock, &req, req.nh.nlmsg_len, 0) < 0) {
+               pr_err("send()");
+               return KSFT_FAIL;
+       }
+
+       if (recv(xfrm_sock, &req, sizeof(req), 0) < 0) {
+               pr_err("recv()");
+               return KSFT_FAIL;
+       } else if (req.nh.nlmsg_type == XFRM_MSG_NEWSPDINFO) {
+               size_t len = NLMSG_PAYLOAD(&req.nh, sizeof(req.unused));
+               struct rtattr *attr = (void *)req.attrbuf;
+               int got_thresh = 0;
+
+               for (; RTA_OK(attr, len); attr = RTA_NEXT(attr, len)) {
+                       if (attr->rta_type == XFRMA_SPD_IPV4_HTHRESH) {
+                               struct xfrmu_spdhthresh *t = RTA_DATA(attr);
+
+                               got_thresh++;
+                               if (t->lbits != 32 || t->rbits != 31) {
+                                       pr_err("thresh differ: %u, %u",
+                                                       t->lbits, t->rbits);
+                                       return KSFT_FAIL;
+                               }
+                       }
+                       if (attr->rta_type == XFRMA_SPD_IPV6_HTHRESH) {
+                               struct xfrmu_spdhthresh *t = RTA_DATA(attr);
+
+                               got_thresh++;
+                               if (t->lbits != 120 || t->rbits != 16) {
+                                       pr_err("thresh differ: %u, %u",
+                                                       t->lbits, t->rbits);
+                                       return KSFT_FAIL;
+                               }
+                       }
+               }
+               if (got_thresh != 2) {
+                       pr_err("only %d thresh returned by XFRM_MSG_GETSPDINFO", got_thresh);
+                       return KSFT_FAIL;
+               }
+       } else if (req.nh.nlmsg_type != NLMSG_ERROR) {
+               printk("expected NLMSG_ERROR, got %d", (int)req.nh.nlmsg_type);
+               return KSFT_FAIL;
+       } else {
+               printk("NLMSG_ERROR: %d: %s", req.error, strerror(-req.error));
+               return -1;
+       }
+
+       /* Restore the default */
+       if (xfrm_spdinfo_set_thresh(xfrm_sock, seq, 32, 32, 128, 128, false)) {
+               pr_err("Can't restore SPD HTHRESH");
+               return KSFT_FAIL;
+       }
+
+       /*
+        * At this moment xfrm uses nlmsg_parse_deprecated(), which
+        * implies NL_VALIDATE_LIBERAL - ignoring attributes with
+        * (type > maxtype). nla_parse_depricated_strict() would enforce
+        * it. Or even stricter nla_parse().
+        * Right now it's not expected to fail, but to be ignored.
+        */
+       if (xfrm_spdinfo_set_thresh(xfrm_sock, seq, 32, 32, 128, 128, true))
+               return KSFT_PASS;
+
+       return KSFT_PASS;
+}
+
 static int child_serv(int xfrm_sock, uint32_t *seq,
                unsigned int nr, int cmd_fd, void *buf, struct xfrm_desc *desc)
 {
@@ -1717,6 +1869,9 @@ static int child_f(unsigned int nr, int test_desc_fd, int cmd_fd, void *buf)
                case EXPIRE_POLICY:
                        ret = xfrm_expire_policy(xfrm_sock, &seq, nr, &desc);
                        break;
+               case SPDINFO_ATTRS:
+                       ret = xfrm_spdinfo_attrs(xfrm_sock, &seq);
+                       break;
                default:
                        printk("Unknown desc type %d", desc.type);
                        exit(KSFT_FAIL);
@@ -1994,8 +2149,10 @@ static int write_proto_plan(int fd, int proto)
  *   sizeof(xfrm_user_polexpire)  = 168  |  sizeof(xfrm_user_polexpire)  = 176
  *
  * Check the affected by the UABI difference structures.
+ * Also, check translation for xfrm_set_spdinfo: it has it's own attributes
+ * which needs to be correctly copied, but not translated.
  */
-const unsigned int compat_plan = 4;
+const unsigned int compat_plan = 5;
 static int write_compat_struct_tests(int test_desc_fd)
 {
        struct xfrm_desc desc = {};
@@ -2019,6 +2176,10 @@ static int write_compat_struct_tests(int test_desc_fd)
        if (__write_desc(test_desc_fd, &desc))
                return -1;
 
+       desc.type = SPDINFO_ATTRS;
+       if (__write_desc(test_desc_fd, &desc))
+               return -1;
+
        return 0;
 }
 
index 15d937b..fd1ffaa 100755 (executable)
@@ -68,16 +68,12 @@ do
        cpumask=`awk -v cpus="$cpus" -v me=$me -v n=$n 'BEGIN {
                srand(n + me + systime());
                ncpus = split(cpus, ca);
-               curcpu = ca[int(rand() * ncpus + 1)];
-               z = "";
-               for (i = 1; 4 * i <= curcpu; i++)
-                       z = z "0";
-               print "0x" 2 ^ (curcpu % 4) z;
+               print ca[int(rand() * ncpus + 1)];
        }' < /dev/null`
        n=$(($n+1))
-       if ! taskset -p $cpumask $$ > /dev/null 2>&1
+       if ! taskset -c -p $cpumask $$ > /dev/null 2>&1
        then
-               echo taskset failure: '"taskset -p ' $cpumask $$ '"'
+               echo taskset failure: '"taskset -c -p ' $cpumask $$ '"'
                exit 1
        fi
 
index e5cc6b2..1af5d6b 100755 (executable)
@@ -14,7 +14,7 @@ if test -z "$TORTURE_KCONFIG_KCSAN_ARG"
 then
        exit 0
 fi
-cat $1/*/console.log |
+find $1 -name console.log -exec cat {} \; |
        grep "BUG: KCSAN: " |
        sed -e 's/^\[[^]]*] //' |
        sort |
index d8c8483..5a0023d 100755 (executable)
@@ -142,7 +142,7 @@ then
        echo "Cannot copy from $oldrun to $rundir."
        usage
 fi
-rm -f "$rundir"/*/{console.log,console.log.diags,qemu_pid,qemu-retval,Warnings,kvm-test-1-run.sh.out,kvm-test-1-run-qemu.sh.out,vmlinux} "$rundir"/log
+rm -f "$rundir"/*/{console.log,console.log.diags,qemu_pid,qemu-pid,qemu-retval,Warnings,kvm-test-1-run.sh.out,kvm-test-1-run-qemu.sh.out,vmlinux} "$rundir"/log
 touch "$rundir/log"
 echo $scriptname $args | tee -a "$rundir/log"
 echo $oldrun > "$rundir/re-run"
@@ -179,6 +179,6 @@ if test -n "$dryrun"
 then
        echo ---- Dryrun complete, directory: $rundir | tee -a "$rundir/log"
 else
-       ( cd "$rundir"; sh $T/runbatches.sh )
+       ( cd "$rundir"; sh $T/runbatches.sh ) | tee -a "$rundir/log"
        kvm-end-run-stats.sh "$rundir" "$starttime"
 fi
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-assign-cpus.sh b/tools/testing/selftests/rcutorture/bin/kvm-assign-cpus.sh
new file mode 100755 (executable)
index 0000000..f99b2c1
--- /dev/null
@@ -0,0 +1,106 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Produce awk statements roughly depicting the system's CPU and cache
+# layout.  If the required information is not available, produce
+# error messages as awk comments.  Successful exit regardless.
+#
+# Usage: kvm-assign-cpus.sh /path/to/sysfs
+
+T=/tmp/kvm-assign-cpus.sh.$$
+trap 'rm -rf $T' 0 2
+mkdir $T
+
+sysfsdir=${1-/sys/devices/system/node}
+if ! cd "$sysfsdir" > $T/msg 2>&1
+then
+       sed -e 's/^/# /' < $T/msg
+       exit 0
+fi
+nodelist="`ls -d node*`"
+for i in node*
+do
+       if ! test -d $i/
+       then
+               echo "# Not a directory: $sysfsdir/node*"
+               exit 0
+       fi
+       for j in $i/cpu*/cache/index*
+       do
+               if ! test -d $j/
+               then
+                       echo "# Not a directory: $sysfsdir/$j"
+                       exit 0
+               else
+                       break
+               fi
+       done
+       indexlist="`ls -d $i/cpu* | grep 'cpu[0-9][0-9]*' | head -1 | sed -e 's,^.*$,ls -d &/cache/index*,' | sh | sed -e 's,^.*/,,'`"
+       break
+done
+for i in node*/cpu*/cache/index*/shared_cpu_list
+do
+       if ! test -f $i
+       then
+               echo "# Not a file: $sysfsdir/$i"
+               exit 0
+       else
+               break
+       fi
+done
+firstshared=
+for i in $indexlist
+do
+       rm -f $T/cpulist
+       for n in node*
+       do
+               f="$n/cpu*/cache/$i/shared_cpu_list"
+               if ! cat $f > $T/msg 2>&1
+               then
+                       sed -e 's/^/# /' < $T/msg
+                       exit 0
+               fi
+               cat $f >> $T/cpulist
+       done
+       if grep -q '[-,]' $T/cpulist
+       then
+               if test -z "$firstshared"
+               then
+                       firstshared="$i"
+               fi
+       fi
+done
+if test -z "$firstshared"
+then
+       splitindex="`echo $indexlist | sed -e 's/ .*$//'`"
+else
+       splitindex="$firstshared"
+fi
+nodenum=0
+for n in node*
+do
+       cat $n/cpu*/cache/$splitindex/shared_cpu_list | sort -u -k1n |
+       awk -v nodenum="$nodenum" '
+       BEGIN {
+               idx = 0;
+       }
+
+       {
+               nlists = split($0, cpulists, ",");
+               for (i = 1; i <= nlists; i++) {
+                       listsize = split(cpulists[i], cpus, "-");
+                       if (listsize == 1)
+                               cpus[2] = cpus[1];
+                       for (j = cpus[1]; j <= cpus[2]; j++) {
+                               print "cpu[" nodenum "][" idx "] = " j ";";
+                               idx++;
+                       }
+               }
+       }
+
+       END {
+               print "nodecpus[" nodenum "] = " idx ";";
+       }'
+       nodenum=`expr $nodenum + 1`
+done
+echo "numnodes = $nodenum;"
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-get-cpus-script.sh b/tools/testing/selftests/rcutorture/bin/kvm-get-cpus-script.sh
new file mode 100755 (executable)
index 0000000..20c7c53
--- /dev/null
@@ -0,0 +1,88 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Create an awk script that takes as input numbers of CPUs and outputs
+# lists of CPUs, one per line in both cases.
+#
+# Usage: kvm-get-cpus-script.sh /path/to/cpu/arrays /path/to/put/script [ /path/to/state ]
+#
+# The CPU arrays are output by kvm-assign-cpus.sh, and are valid awk
+# statements initializing the variables describing the system's topology.
+#
+# The optional state is input by this script (if the file exists and is
+# non-empty), and can also be output by this script.
+
+cpuarrays="${1-/sys/devices/system/node}"
+scriptfile="${2}"
+statefile="${3}"
+
+if ! test -f "$cpuarrays"
+then
+       echo "File not found: $cpuarrays" 1>&2
+       exit 1
+fi
+scriptdir="`dirname "$scriptfile"`"
+if ! test -d "$scriptdir" || ! test -x "$scriptdir" || ! test -w "$scriptdir"
+then
+       echo "Directory not usable for script output: $scriptdir"
+       exit 1
+fi
+
+cat << '___EOF___' > "$scriptfile"
+BEGIN {
+___EOF___
+cat "$cpuarrays" >> "$scriptfile"
+if test -r "$statefile"
+then
+       cat "$statefile" >> "$scriptfile"
+fi
+cat << '___EOF___' >> "$scriptfile"
+}
+
+# Do we have the system architecture to guide CPU affinity?
+function gotcpus()
+{
+       return numnodes != "";
+}
+
+# Return a comma-separated list of the next n CPUs.
+function nextcpus(n,  i, s)
+{
+       for (i = 0; i < n; i++) {
+               if (nodecpus[curnode] == "")
+                       curnode = 0;
+               if (cpu[curnode][curcpu[curnode]] == "")
+                       curcpu[curnode] = 0;
+               if (s != "")
+                       s = s ",";
+               s = s cpu[curnode][curcpu[curnode]];
+               curcpu[curnode]++;
+               curnode++
+       }
+       return s;
+}
+
+# Dump out the current node/CPU state so that a later invocation of this
+# script can continue where this one left off.  Of course, this only works
+# when a state file was specified and where there was valid sysfs state.
+# Returns 1 if the state was dumped, 0 otherwise.
+#
+# Dumping the state for one system configuration and loading it into
+# another isn't likely to do what you want, whatever that might be.
+function dumpcpustate(  i, fn)
+{
+___EOF___
+echo ' fn = "'"$statefile"'";' >> $scriptfile
+cat << '___EOF___' >> "$scriptfile"
+       if (fn != "" && gotcpus()) {
+               print "curnode = " curnode ";" > fn;
+               for (i = 0; i < numnodes; i++)
+                       if (curcpu[i] != "")
+                               print "curcpu[" i "] = " curcpu[i] ";" >> fn;
+               return 1;
+       }
+       if (fn != "")
+               print "# No CPU state to dump." > fn;
+       return 0;
+}
+___EOF___
index f3a7a5e..db2c0e2 100755 (executable)
@@ -25,7 +25,7 @@ then
        echo "$configfile -------"
 else
        title="$configfile ------- $ncs acquisitions/releases"
-       dur=`sed -e 's/^.* locktorture.shutdown_secs=//' -e 's/ .*$//' < $i/qemu-cmd 2> /dev/null`
+       dur=`grep -v '^#' $i/qemu-cmd | sed -e 's/^.* locktorture.shutdown_secs=//' -e 's/ .*$//' 2> /dev/null`
        if test -z "$dur"
        then
                :
index 671bfee..3afa5c6 100755 (executable)
@@ -25,7 +25,7 @@ if test -z "$nscfs"
 then
        echo "$configfile ------- "
 else
-       dur="`sed -e 's/^.* scftorture.shutdown_secs=//' -e 's/ .*$//' < $i/qemu-cmd 2> /dev/null`"
+       dur="`grep -v '^#' $i/qemu-cmd | sed -e 's/^.* scftorture.shutdown_secs=//' -e 's/ .*$//' 2> /dev/null`"
        if test -z "$dur"
        then
                rate=""
index e01b31b..0a54199 100755 (executable)
@@ -74,7 +74,10 @@ do
        done
        if test -f "$rd/kcsan.sum"
        then
-               if grep -q CONFIG_KCSAN=y $T
+               if ! test -f $T
+               then
+                       :
+               elif grep -q CONFIG_KCSAN=y $T
                then
                        echo "Compiler or architecture does not support KCSAN!"
                        echo Did you forget to switch your compiler with '--kmake-arg CC=<cc-that-supports-kcsan>'?
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-remote-noreap.sh b/tools/testing/selftests/rcutorture/bin/kvm-remote-noreap.sh
new file mode 100755 (executable)
index 0000000..014ce68
--- /dev/null
@@ -0,0 +1,30 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Periodically scan a directory tree to prevent files from being reaped
+# by systemd and friends on long runs.
+#
+# Usage: kvm-remote-noreap.sh pathname
+#
+# Copyright (C) 2021 Facebook, Inc.
+#
+# Authors: Paul E. McKenney <paulmck@kernel.org>
+
+pathname="$1"
+if test "$pathname" = ""
+then
+       echo Usage: kvm-remote-noreap.sh pathname
+       exit 1
+fi
+if ! test -d "$pathname"
+then
+       echo  Usage: kvm-remote-noreap.sh pathname
+       echo "       pathname must be a directory."
+       exit 2
+fi
+
+while test -d "$pathname"
+do
+       find "$pathname" -type f -exec touch -c {} \; > /dev/null 2>&1
+       sleep 30
+done
index 79e680e..03126eb 100755 (executable)
@@ -124,10 +124,12 @@ awk < "$rundir"/scenarios -v dest="$T/bin" -v rundir="$rundir" '
        n = $1;
        sub(/\./, "", n);
        fn = dest "/kvm-remote-" n ".sh"
+       print "kvm-remote-noreap.sh " rundir " &" > fn;
        scenarios = "";
        for (i = 2; i <= NF; i++)
                scenarios = scenarios " " $i;
-       print "kvm-test-1-run-batch.sh" scenarios > fn;
+       print "kvm-test-1-run-batch.sh" scenarios >> fn;
+       print "sync" >> fn;
        print "rm " rundir "/remote.run" >> fn;
 }'
 chmod +x $T/bin/kvm-remote-*.sh
@@ -172,11 +174,20 @@ checkremotefile () {
        do
                ssh $1 "test -f \"$2\""
                ret=$?
-               if test "$ret" -ne 255
+               if test "$ret" -eq 255
                then
+                       echo " ---" ssh failure to $1 checking for file $2, retry after $sleeptime seconds. `date`
+               elif test "$ret" -eq 0
+               then
+                       return 0
+               elif test "$ret" -eq 1
+               then
+                       echo " ---" File \"$2\" not found: ssh $1 test -f \"$2\"
+                       return 1
+               else
+                       echo " ---" Exit code $ret: ssh $1 test -f \"$2\", retry after $sleeptime seconds. `date`
                        return $ret
                fi
-               echo " ---" ssh failure to $1 checking for file $2, retry after $sleeptime seconds. `date`
                sleep $sleeptime
        done
 }
@@ -242,7 +253,8 @@ do
        do
                sleep 30
        done
-       ( cd "$oldrun"; ssh $i "cd $rundir; tar -czf - kvm-remote-*.sh.out */console.log */kvm-test-1-run*.sh.out */qemu_pid */qemu-retval; rm -rf $T > /dev/null 2>&1" | tar -xzf - )
+       echo " ---" Collecting results from $i `date`
+       ( cd "$oldrun"; ssh $i "cd $rundir; tar -czf - kvm-remote-*.sh.out */console.log */kvm-test-1-run*.sh.out */qemu[_-]pid */qemu-retval */qemu-affinity; rm -rf $T > /dev/null 2>&1" | tar -xzf - )
 done
 
 ( kvm-end-run-stats.sh "$oldrun" "$starttime"; echo $? > $T/exitcode ) | tee -a "$oldrun/remote-log"
index 7ea0809..1e29d65 100755 (executable)
@@ -50,10 +50,34 @@ grep '^#' $1/qemu-cmd | sed -e 's/^# //' > $T/qemu-cmd-settings
 echo ---- System running test: `uname -a`
 echo ---- Starting kernels. `date` | tee -a log
 $TORTURE_JITTER_START
+kvm-assign-cpus.sh /sys/devices/system/node > $T/cpuarray.awk
 for i in "$@"
 do
        echo ---- System running test: `uname -a` > $i/kvm-test-1-run-qemu.sh.out
        echo > $i/kvm-test-1-run-qemu.sh.out
+       export TORTURE_AFFINITY=
+       kvm-get-cpus-script.sh $T/cpuarray.awk $T/cpubatches.awk $T/cpustate
+       cat << '        ___EOF___' >> $T/cpubatches.awk
+       END {
+               affinitylist = "";
+               if (!gotcpus()) {
+                       print "echo No CPU-affinity information, so no taskset command.";
+               } else if (cpu_count !~ /^[0-9][0-9]*$/) {
+                       print "echo " scenario ": Bogus number of CPUs (old qemu-cmd?), so no taskset command.";
+               } else {
+                       affinitylist = nextcpus(cpu_count);
+                       if (!(affinitylist ~ /^[0-9,-][0-9,-]*$/))
+                               print "echo " scenario ": Bogus CPU-affinity information, so no taskset command.";
+                       else if (!dumpcpustate())
+                               print "echo " scenario ": Could not dump state, so no taskset command.";
+                       else
+                               print "export TORTURE_AFFINITY=" affinitylist;
+               }
+       }
+       ___EOF___
+       cpu_count="`grep '# TORTURE_CPU_COUNT=' $i/qemu-cmd | sed -e 's/^.*=//'`"
+       affinity_export="`awk -f $T/cpubatches.awk -v cpu_count="$cpu_count" -v scenario=$i < /dev/null`"
+       $affinity_export
        kvm-test-1-run-qemu.sh $i >> $i/kvm-test-1-run-qemu.sh.out 2>&1 &
 done
 for i in $runfiles
index 5b1aa2a..4428058 100755 (executable)
@@ -39,27 +39,34 @@ echo ' ---' `date`: Starting kernel, PID $$
 grep '^#' $resdir/qemu-cmd | sed -e 's/^# //' > $T/qemu-cmd-settings
 . $T/qemu-cmd-settings
 
-# Decorate qemu-cmd with redirection, backgrounding, and PID capture
-sed -e 's/$/ 2>\&1 \&/' < $resdir/qemu-cmd > $T/qemu-cmd
-echo 'echo $! > $resdir/qemu_pid' >> $T/qemu-cmd
+# Decorate qemu-cmd with affinity, redirection, backgrounding, and PID capture
+taskset_command=
+if test -n "$TORTURE_AFFINITY"
+then
+       taskset_command="taskset -c $TORTURE_AFFINITY "
+fi
+sed -e 's/^[^#].*$/'"$taskset_command"'& 2>\&1 \&/' < $resdir/qemu-cmd > $T/qemu-cmd
+echo 'qemu_pid=$!' >> $T/qemu-cmd
+echo 'echo $qemu_pid > $resdir/qemu-pid' >> $T/qemu-cmd
+echo 'taskset -c -p $qemu_pid > $resdir/qemu-affinity' >> $T/qemu-cmd
 
 # In case qemu refuses to run...
 echo "NOTE: $QEMU either did not run or was interactive" > $resdir/console.log
 
 # Attempt to run qemu
 kstarttime=`gawk 'BEGIN { print systime() }' < /dev/null`
-( . $T/qemu-cmd; wait `cat  $resdir/qemu_pid`; echo $? > $resdir/qemu-retval ) &
+( . $T/qemu-cmd; wait `cat  $resdir/qemu-pid`; echo $? > $resdir/qemu-retval ) &
 commandcompleted=0
 if test -z "$TORTURE_KCONFIG_GDB_ARG"
 then
        sleep 10 # Give qemu's pid a chance to reach the file
-       if test -s "$resdir/qemu_pid"
+       if test -s "$resdir/qemu-pid"
        then
-               qemu_pid=`cat "$resdir/qemu_pid"`
-               echo Monitoring qemu job at pid $qemu_pid
+               qemu_pid=`cat "$resdir/qemu-pid"`
+               echo Monitoring qemu job at pid $qemu_pid `date`
        else
                qemu_pid=""
-               echo Monitoring qemu job at yet-as-unknown pid
+               echo Monitoring qemu job at yet-as-unknown pid `date`
        fi
 fi
 if test -n "$TORTURE_KCONFIG_GDB_ARG"
@@ -82,9 +89,9 @@ then
 fi
 while :
 do
-       if test -z "$qemu_pid" -a -s "$resdir/qemu_pid"
+       if test -z "$qemu_pid" && test -s "$resdir/qemu-pid"
        then
-               qemu_pid=`cat "$resdir/qemu_pid"`
+               qemu_pid=`cat "$resdir/qemu-pid"`
        fi
        kruntime=`gawk 'BEGIN { print systime() - '"$kstarttime"' }' < /dev/null`
        if test -z "$qemu_pid" || kill -0 "$qemu_pid" > /dev/null 2>&1
@@ -115,22 +122,22 @@ do
                break
        fi
 done
-if test -z "$qemu_pid" -a -s "$resdir/qemu_pid"
+if test -z "$qemu_pid" && test -s "$resdir/qemu-pid"
 then
-       qemu_pid=`cat "$resdir/qemu_pid"`
+       qemu_pid=`cat "$resdir/qemu-pid"`
 fi
-if test $commandcompleted -eq 0 -a -n "$qemu_pid"
+if test $commandcompleted -eq 0 && test -n "$qemu_pid"
 then
        if ! test -f "$resdir/../STOP.1"
        then
-               echo Grace period for qemu job at pid $qemu_pid
+               echo Grace period for qemu job at pid $qemu_pid `date`
        fi
        oldline="`tail $resdir/console.log`"
        while :
        do
                if test -f "$resdir/../STOP.1"
                then
-                       echo "PID $qemu_pid killed due to run STOP.1 request" >> $resdir/Warnings 2>&1
+                       echo "PID $qemu_pid killed due to run STOP.1 request `date`" >> $resdir/Warnings 2>&1
                        kill -KILL $qemu_pid
                        break
                fi
@@ -152,13 +159,17 @@ then
                then
                        last_ts=0
                fi
-               if test "$newline" != "$oldline" -a "$last_ts" -lt $((seconds + $TORTURE_SHUTDOWN_GRACE))
+               if test "$newline" != "$oldline" && test "$last_ts" -lt $((seconds + $TORTURE_SHUTDOWN_GRACE)) && test "$last_ts" -gt "$TORTURE_SHUTDOWN_GRACE"
                then
                        must_continue=yes
+                       if test $kruntime -ge $((seconds + $TORTURE_SHUTDOWN_GRACE))
+                       then
+                               echo Continuing at console.log time $last_ts \"`tail -n 1 $resdir/console.log`\" `date`
+                       fi
                fi
-               if test $must_continue = no -a $kruntime -ge $((seconds + $TORTURE_SHUTDOWN_GRACE))
+               if test $must_continue = no && test $kruntime -ge $((seconds + $TORTURE_SHUTDOWN_GRACE))
                then
-                       echo "!!! PID $qemu_pid hung at $kruntime vs. $seconds seconds" >> $resdir/Warnings 2>&1
+                       echo "!!! PID $qemu_pid hung at $kruntime vs. $seconds seconds `date`" >> $resdir/Warnings 2>&1
                        kill -KILL $qemu_pid
                        break
                fi
@@ -172,5 +183,3 @@ fi
 
 # Tell the script that this run is done.
 rm -f $resdir/build.run
-
-parse-console.sh $resdir/console.log $title
index 420ed5c..f4c8055 100755 (executable)
@@ -205,6 +205,7 @@ echo "# TORTURE_KCONFIG_GDB_ARG=\"$TORTURE_KCONFIG_GDB_ARG\"" >> $resdir/qemu-cm
 echo "# TORTURE_JITTER_START=\"$TORTURE_JITTER_START\"" >> $resdir/qemu-cmd
 echo "# TORTURE_JITTER_STOP=\"$TORTURE_JITTER_STOP\"" >> $resdir/qemu-cmd
 echo "# TORTURE_TRUST_MAKE=\"$TORTURE_TRUST_MAKE\"; export TORTURE_TRUST_MAKE" >> $resdir/qemu-cmd
+echo "# TORTURE_CPU_COUNT=$cpu_count" >> $resdir/qemu-cmd
 
 if test -n "$TORTURE_BUILDONLY"
 then
@@ -214,3 +215,4 @@ then
 fi
 
 kvm-test-1-run-qemu.sh $resdir
+parse-console.sh $resdir/console.log $title
index b4ac4ee..f442d84 100755 (executable)
@@ -430,17 +430,10 @@ then
        git diff HEAD >> $resdir/$ds/testid.txt
 fi
 ___EOF___
-awk < $T/cfgcpu.pack \
-       -v TORTURE_BUILDONLY="$TORTURE_BUILDONLY" \
-       -v CONFIGDIR="$CONFIGFRAG/" \
-       -v KVM="$KVM" \
-       -v ncpus=$cpus \
-       -v jitter="$jitter" \
-       -v rd=$resdir/$ds/ \
-       -v dur=$dur \
-       -v TORTURE_QEMU_ARG="$TORTURE_QEMU_ARG" \
-       -v TORTURE_BOOTARGS="$TORTURE_BOOTARGS" \
-'BEGIN {
+kvm-assign-cpus.sh /sys/devices/system/node > $T/cpuarray.awk
+kvm-get-cpus-script.sh $T/cpuarray.awk $T/dumpbatches.awk
+cat << '___EOF___' >> $T/dumpbatches.awk
+BEGIN {
        i = 0;
 }
 
@@ -451,7 +444,7 @@ awk < $T/cfgcpu.pack \
 }
 
 # Dump out the scripting required to run one test batch.
-function dump(first, pastlast, batchnum)
+function dump(first, pastlast, batchnum,  affinitylist)
 {
        print "echo ----Start batch " batchnum ": `date` | tee -a " rd "log";
        print "needqemurun="
@@ -483,6 +476,14 @@ function dump(first, pastlast, batchnum)
                print "echo ", cfr[jn], cpusr[jn] ovf ": Starting build. `date` | tee -a " rd "log";
                print "mkdir " rd cfr[jn] " || :";
                print "touch " builddir ".wait";
+               affinitylist = "";
+               if (gotcpus()) {
+                       affinitylist = nextcpus(cpusr[jn]);
+               }
+               if (affinitylist ~ /^[0-9,-][0-9,-]*$/)
+                       print "export TORTURE_AFFINITY=" affinitylist;
+               else
+                       print "export TORTURE_AFFINITY=";
                print "kvm-test-1-run.sh " CONFIGDIR cf[j], rd cfr[jn], dur " \"" TORTURE_QEMU_ARG "\" \"" TORTURE_BOOTARGS "\" > " rd cfr[jn]  "/kvm-test-1-run.sh.out 2>&1 &"
                print "echo ", cfr[jn], cpusr[jn] ovf ": Waiting for build to complete. `date` | tee -a " rd "log";
                print "while test -f " builddir ".wait"
@@ -560,7 +561,19 @@ END {
        # Dump the last batch.
        if (ncpus != 0)
                dump(first, i, batchnum);
-}' >> $T/script
+}
+___EOF___
+awk < $T/cfgcpu.pack \
+       -v TORTURE_BUILDONLY="$TORTURE_BUILDONLY" \
+       -v CONFIGDIR="$CONFIGFRAG/" \
+       -v KVM="$KVM" \
+       -v ncpus=$cpus \
+       -v jitter="$jitter" \
+       -v rd=$resdir/$ds/ \
+       -v dur=$dur \
+       -v TORTURE_QEMU_ARG="$TORTURE_QEMU_ARG" \
+       -v TORTURE_BOOTARGS="$TORTURE_BOOTARGS" \
+       -f $T/dumpbatches.awk >> $T/script
 echo kvm-end-run-stats.sh "$resdir/$ds" "$starttime" >> $T/script
 
 # Extract the tests and their batches from the script.
index 53ec7c0..363f560 100755 (executable)
@@ -53,6 +53,7 @@ do_refscale=yes
 do_kvfree=yes
 do_kasan=yes
 do_kcsan=no
+do_clocksourcewd=yes
 
 # doyesno - Helper function for yes/no arguments
 function doyesno () {
@@ -72,6 +73,7 @@ usage () {
        echo "       --configs-scftorture \"config-file list w/ repeat factor (2*CFLIST)\""
        echo "       --doall"
        echo "       --doallmodconfig / --do-no-allmodconfig"
+       echo "       --do-clocksourcewd / --do-no-clocksourcewd"
        echo "       --do-kasan / --do-no-kasan"
        echo "       --do-kcsan / --do-no-kcsan"
        echo "       --do-kvfree / --do-no-kvfree"
@@ -109,7 +111,7 @@ do
                configs_scftorture="$configs_scftorture $2"
                shift
                ;;
-       --doall)
+       --do-all|--doall)
                do_allmodconfig=yes
                do_rcutorture=yes
                do_locktorture=yes
@@ -119,10 +121,14 @@ do
                do_kvfree=yes
                do_kasan=yes
                do_kcsan=yes
+               do_clocksourcewd=yes
                ;;
        --do-allmodconfig|--do-no-allmodconfig)
                do_allmodconfig=`doyesno "$1" --do-allmodconfig`
                ;;
+       --do-clocksourcewd|--do-no-clocksourcewd)
+               do_clocksourcewd=`doyesno "$1" --do-clocksourcewd`
+               ;;
        --do-kasan|--do-no-kasan)
                do_kasan=`doyesno "$1" --do-kasan`
                ;;
@@ -135,7 +141,7 @@ do
        --do-locktorture|--do-no-locktorture)
                do_locktorture=`doyesno "$1" --do-locktorture`
                ;;
-       --do-none)
+       --do-none|--donone)
                do_allmodconfig=no
                do_rcutorture=no
                do_locktorture=no
@@ -145,6 +151,7 @@ do
                do_kvfree=no
                do_kasan=no
                do_kcsan=no
+               do_clocksourcewd=no
                ;;
        --do-rcuscale|--do-no-rcuscale)
                do_rcuscale=`doyesno "$1" --do-rcuscale`
@@ -279,9 +286,9 @@ function torture_one {
 #      torture_bootargs="[ kernel boot arguments ]"
 #      torture_set flavor [ kvm.sh arguments ]
 #
-# Note that "flavor" is an arbitrary string.  Supply --torture if needed.
-# Note that quoting is problematic.  So on the command line, pass multiple
-# values with multiple kvm.sh argument instances.
+# Note that "flavor" is an arbitrary string that does not affect kvm.sh
+# in any way.  So also supply --torture if you need something other than
+# the default.
 function torture_set {
        local cur_kcsan_kmake_args=
        local kcsan_kmake_tag=
@@ -377,6 +384,22 @@ then
        torture_set "rcuscale-kvfree" tools/testing/selftests/rcutorture/bin/kvm.sh --torture rcuscale --allcpus --duration 10 --kconfig "CONFIG_NR_CPUS=$HALF_ALLOTED_CPUS" --memory 1G --trust-make
 fi
 
+if test "$do_clocksourcewd" = "yes"
+then
+       torture_bootargs="rcupdate.rcu_cpu_stall_suppress_at_boot=1 torture.disable_onoff_at_boot rcupdate.rcu_task_stall_timeout=30000"
+       torture_set "clocksourcewd-1" tools/testing/selftests/rcutorture/bin/kvm.sh --allcpus --duration 45s --configs TREE03 --kconfig "CONFIG_TEST_CLOCKSOURCE_WATCHDOG=y" --trust-make
+
+       torture_bootargs="rcupdate.rcu_cpu_stall_suppress_at_boot=1 torture.disable_onoff_at_boot rcupdate.rcu_task_stall_timeout=30000 clocksource.max_cswd_read_retries=1"
+       torture_set "clocksourcewd-2" tools/testing/selftests/rcutorture/bin/kvm.sh --allcpus --duration 45s --configs TREE03 --kconfig "CONFIG_TEST_CLOCKSOURCE_WATCHDOG=y" --trust-make
+
+       # In case our work is already done...
+       if test "$do_rcutorture" != "yes"
+       then
+               torture_bootargs="rcupdate.rcu_cpu_stall_suppress_at_boot=1 torture.disable_onoff_at_boot rcupdate.rcu_task_stall_timeout=30000"
+               torture_set "clocksourcewd-3" tools/testing/selftests/rcutorture/bin/kvm.sh --allcpus --duration 45s --configs TREE03 --trust-make
+       fi
+fi
+
 echo " --- " $scriptname $args
 echo " --- " Done `date` | tee -a $T/log
 ret=0
@@ -395,6 +418,10 @@ then
        nfailures="`wc -l "$T/failures" | awk '{ print $1 }'`"
        ret=2
 fi
+if test "$do_kcsan" = "yes"
+then
+       TORTURE_KCONFIG_KCSAN_ARG=1 tools/testing/selftests/rcutorture/bin/kcsan-collapse.sh tools/testing/selftests/rcutorture/res/$ds > tools/testing/selftests/rcutorture/res/$ds/kcsan.sum
+fi
 echo Started at $startdate, ended at `date`, duration `get_starttime_duration $starttime`. | tee -a $T/log
 echo Summary: Successes: $nsuccesses Failures: $nfailures. | tee -a $T/log
 tdir="`cat $T/successes $T/failures | head -1 | awk '{ print $NF }' | sed -e 's,/[^/]\+/*$,,'`"
index bafe94c..3ca1124 100644 (file)
@@ -1,5 +1,5 @@
 CONFIG_SMP=y
-CONFIG_NR_CPUS=2
+CONFIG_NR_CPUS=4
 CONFIG_HOTPLUG_CPU=y
 CONFIG_PREEMPT_NONE=n
 CONFIG_PREEMPT_VOLUNTARY=n
index bafe94c..3ca1124 100644 (file)
@@ -1,5 +1,5 @@
 CONFIG_SMP=y
-CONFIG_NR_CPUS=2
+CONFIG_NR_CPUS=4
 CONFIG_HOTPLUG_CPU=y
 CONFIG_PREEMPT_NONE=n
 CONFIG_PREEMPT_VOLUNTARY=n
index ea43990..dc02083 100644 (file)
@@ -1,5 +1,5 @@
 CONFIG_SMP=y
-CONFIG_NR_CPUS=2
+CONFIG_NR_CPUS=4
 CONFIG_PREEMPT_NONE=n
 CONFIG_PREEMPT_VOLUNTARY=n
 CONFIG_PREEMPT=y
index dee7a3d..92bbc5a 100644 (file)
@@ -55,10 +55,27 @@ static bool alloc_q1q2_ctx(const uint8_t *s, const uint8_t *m,
        return true;
 }
 
+static void reverse_bytes(void *data, int length)
+{
+       int i = 0;
+       int j = length - 1;
+       uint8_t temp;
+       uint8_t *ptr = data;
+
+       while (i < j) {
+               temp = ptr[i];
+               ptr[i] = ptr[j];
+               ptr[j] = temp;
+               i++;
+               j--;
+       }
+}
+
 static bool calc_q1q2(const uint8_t *s, const uint8_t *m, uint8_t *q1,
                      uint8_t *q2)
 {
        struct q1q2_ctx ctx;
+       int len;
 
        if (!alloc_q1q2_ctx(s, m, &ctx)) {
                fprintf(stderr, "Not enough memory for Q1Q2 calculation\n");
@@ -89,8 +106,10 @@ static bool calc_q1q2(const uint8_t *s, const uint8_t *m, uint8_t *q1,
                goto out;
        }
 
-       BN_bn2bin(ctx.q1, q1);
-       BN_bn2bin(ctx.q2, q2);
+       len = BN_bn2bin(ctx.q1, q1);
+       reverse_bytes(q1, len);
+       len = BN_bn2bin(ctx.q2, q2);
+       reverse_bytes(q2, len);
 
        free_q1q2_ctx(&ctx);
        return true;
@@ -152,22 +171,6 @@ static RSA *gen_sign_key(void)
        return key;
 }
 
-static void reverse_bytes(void *data, int length)
-{
-       int i = 0;
-       int j = length - 1;
-       uint8_t temp;
-       uint8_t *ptr = data;
-
-       while (i < j) {
-               temp = ptr[i];
-               ptr[i] = ptr[j];
-               ptr[j] = temp;
-               i++;
-               j--;
-       }
-}
-
 enum mrtags {
        MRECREATE = 0x0045544145524345,
        MREADD = 0x0000000044444145,
@@ -367,8 +370,6 @@ bool encl_measure(struct encl *encl)
        /* BE -> LE */
        reverse_bytes(sigstruct->signature, SGX_MODULUS_SIZE);
        reverse_bytes(sigstruct->modulus, SGX_MODULUS_SIZE);
-       reverse_bytes(sigstruct->q1, SGX_MODULUS_SIZE);
-       reverse_bytes(sigstruct->q2, SGX_MODULUS_SIZE);
 
        EVP_MD_CTX_destroy(ctx);
        RSA_free(key);
index b587b9a..0d7bbe4 100644 (file)
@@ -4,7 +4,8 @@ test: virtio_test vringh_test
 virtio_test: virtio_ring.o virtio_test.o
 vringh_test: vringh_test.o vringh.o virtio_ring.o
 
-CFLAGS += -g -O2 -Werror -Wall -I. -I../include/ -I ../../usr/include/ -Wno-pointer-sign -fno-strict-overflow -fno-strict-aliasing -fno-common -MMD -U_FORTIFY_SOURCE -include ../../include/linux/kconfig.h
+CFLAGS += -g -O2 -Werror -Wno-maybe-uninitialized -Wall -I. -I../include/ -I ../../usr/include/ -Wno-pointer-sign -fno-strict-overflow -fno-strict-aliasing -fno-common -MMD -U_FORTIFY_SOURCE -include ../../include/linux/kconfig.h
+LDFLAGS += -lpthread
 vpath %.c ../../drivers/virtio ../../drivers/vhost
 mod:
        ${MAKE} -C `pwd`/../.. M=`pwd`/vhost_test V=${V}
diff --git a/tools/virtio/linux/spinlock.h b/tools/virtio/linux/spinlock.h
new file mode 100644 (file)
index 0000000..028e3cd
--- /dev/null
@@ -0,0 +1,56 @@
+#ifndef SPINLOCK_H_STUB
+#define SPINLOCK_H_STUB
+
+#include <pthread.h>
+
+typedef pthread_spinlock_t  spinlock_t;
+
+static inline void spin_lock_init(spinlock_t *lock)
+{
+       int r = pthread_spin_init(lock, 0);
+       assert(!r);
+}
+
+static inline void spin_lock(spinlock_t *lock)
+{
+       int ret = pthread_spin_lock(lock);
+       assert(!ret);
+}
+
+static inline void spin_unlock(spinlock_t *lock)
+{
+       int ret = pthread_spin_unlock(lock);
+       assert(!ret);
+}
+
+static inline void spin_lock_bh(spinlock_t *lock)
+{
+       spin_lock(lock);
+}
+
+static inline void spin_unlock_bh(spinlock_t *lock)
+{
+       spin_unlock(lock);
+}
+
+static inline void spin_lock_irq(spinlock_t *lock)
+{
+       spin_lock(lock);
+}
+
+static inline void spin_unlock_irq(spinlock_t *lock)
+{
+       spin_unlock(lock);
+}
+
+static inline void spin_lock_irqsave(spinlock_t *lock, unsigned long f)
+{
+       spin_lock(lock);
+}
+
+static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long f)
+{
+       spin_unlock(lock);
+}
+
+#endif
index 5d90254..363b982 100644 (file)
@@ -3,6 +3,7 @@
 #define LINUX_VIRTIO_H
 #include <linux/scatterlist.h>
 #include <linux/kernel.h>
+#include <linux/spinlock.h>
 
 struct device {
        void *parent;
@@ -12,6 +13,7 @@ struct virtio_device {
        struct device dev;
        u64 features;
        struct list_head vqs;
+       spinlock_t vqs_list_lock;
 };
 
 struct virtqueue {
index 9869598..b50dbe2 100644 (file)
@@ -892,6 +892,8 @@ static void kvm_destroy_vm_debugfs(struct kvm *kvm)
 
 static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
 {
+       static DEFINE_MUTEX(kvm_debugfs_lock);
+       struct dentry *dent;
        char dir_name[ITOA_MAX_LEN * 2];
        struct kvm_stat_data *stat_data;
        const struct _kvm_stats_desc *pdesc;
@@ -903,8 +905,20 @@ static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
                return 0;
 
        snprintf(dir_name, sizeof(dir_name), "%d-%d", task_pid_nr(current), fd);
-       kvm->debugfs_dentry = debugfs_create_dir(dir_name, kvm_debugfs_dir);
+       mutex_lock(&kvm_debugfs_lock);
+       dent = debugfs_lookup(dir_name, kvm_debugfs_dir);
+       if (dent) {
+               pr_warn_ratelimited("KVM: debugfs: duplicate directory %s\n", dir_name);
+               dput(dent);
+               mutex_unlock(&kvm_debugfs_lock);
+               return 0;
+       }
+       dent = debugfs_create_dir(dir_name, kvm_debugfs_dir);
+       mutex_unlock(&kvm_debugfs_lock);
+       if (IS_ERR(dent))
+               return 0;
 
+       kvm->debugfs_dentry = dent;
        kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries,
                                         sizeof(*kvm->debugfs_stat_data),
                                         GFP_KERNEL_ACCOUNT);
@@ -3110,6 +3124,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
                                        ++vcpu->stat.generic.halt_poll_invalid;
                                goto out;
                        }
+                       cpu_relax();
                        poll_end = cur = ktime_get();
                } while (kvm_vcpu_can_poll(cur, stop));
        }
@@ -4390,6 +4405,16 @@ struct compat_kvm_dirty_log {
        };
 };
 
+struct compat_kvm_clear_dirty_log {
+       __u32 slot;
+       __u32 num_pages;
+       __u64 first_page;
+       union {
+               compat_uptr_t dirty_bitmap; /* one bit per page */
+               __u64 padding2;
+       };
+};
+
 static long kvm_vm_compat_ioctl(struct file *filp,
                           unsigned int ioctl, unsigned long arg)
 {
@@ -4399,6 +4424,24 @@ static long kvm_vm_compat_ioctl(struct file *filp,
        if (kvm->mm != current->mm)
                return -EIO;
        switch (ioctl) {
+#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
+       case KVM_CLEAR_DIRTY_LOG: {
+               struct compat_kvm_clear_dirty_log compat_log;
+               struct kvm_clear_dirty_log log;
+
+               if (copy_from_user(&compat_log, (void __user *)arg,
+                                  sizeof(compat_log)))
+                       return -EFAULT;
+               log.slot         = compat_log.slot;
+               log.num_pages    = compat_log.num_pages;
+               log.first_page   = compat_log.first_page;
+               log.padding2     = compat_log.padding2;
+               log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
+
+               r = kvm_vm_ioctl_clear_dirty_log(kvm, &log);
+               break;
+       }
+#endif
        case KVM_GET_DIRTY_LOG: {
                struct compat_kvm_dirty_log compat_log;
                struct kvm_dirty_log log;
@@ -5172,7 +5215,7 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
        }
        add_uevent_var(env, "PID=%d", kvm->userspace_pid);
 
-       if (!IS_ERR_OR_NULL(kvm->debugfs_dentry)) {
+       if (kvm->debugfs_dentry) {
                char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL_ACCOUNT);
 
                if (p) {